FER Model Training

Table of Contents


Importing Libraries

In [1]:
! pip install scikeras
Collecting scikeras
  Downloading scikeras-0.7.0-py3-none-any.whl (27 kB)
Requirement already satisfied: importlib-metadata>=3 in /usr/local/lib/python3.7/dist-packages (from scikeras) (4.11.3)
Requirement already satisfied: packaging<22.0,>=0.21 in /usr/local/lib/python3.7/dist-packages (from scikeras) (21.3)
Requirement already satisfied: scikit-learn>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from scikeras) (1.0.2)
Requirement already satisfied: typing-extensions>=3.6.4 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=3->scikeras) (4.2.0)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=3->scikeras) (3.8.0)
Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging<22.0,>=0.21->scikeras) (3.0.8)
Requirement already satisfied: scipy>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=1.0.0->scikeras) (1.4.1)
Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=1.0.0->scikeras) (3.1.0)
Requirement already satisfied: numpy>=1.14.6 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=1.0.0->scikeras) (1.21.6)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=1.0.0->scikeras) (1.1.0)
Installing collected packages: scikeras
Successfully installed scikeras-0.7.0
In [2]:
import tensorflow as tf
import numpy as np
import random
import os
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns

from tensorflow import keras
from tensorflow.keras import models, layers, optimizers, regularizers
from tensorflow.keras.utils import to_categorical, plot_model
from keras.preprocessing.image import ImageDataGenerator
from imblearn.over_sampling import RandomOverSampler
from sklearn.model_selection import train_test_split, GridSearchCV
from scikeras.wrappers import KerasClassifier
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, f1_score, classification_report
from prettytable import PrettyTable

Helper Functions

In [3]:
def load_fer_dataset(df) :
  width, height = 48, 48
  x_train, x_test = df['pixels'], df['emotion']
  oversampler = RandomOverSampler(sampling_strategy='auto') # oversampling dataset to fix the imbalance problem

  x_train, x_test = oversampler.fit_resample(x_train.values.reshape(-1,1), x_test)
  x_train = pd.Series(x_train.flatten())
  x_train = np.array(list(map(str.split, x_train)), 'float32') / 255
  x_train = x_train.reshape(-1, width, height, 1)

  x_test = np.array(x_test)
  x_test = x_test.reshape(x_test.shape[0], 1)

  return train_test_split(x_train, x_test, test_size=0.2, random_state=45) # 80/20 Train/Test Split
In [4]:
def preprocess_data(y_train, y_test, y_val, num_classes):
  return to_categorical(y_train, num_classes), to_categorical(y_test, num_classes), to_categorical(y_val, num_classes) # one hot encoding the training set, test set and validation set
In [5]:
def plot_all_emotions():
  fig = plt.figure(1, (12, 12))
  fig.suptitle("Images representing each emotion", fontsize=20, weight='bold')
  img_count = 0

  for num_emotion in sorted(df_explore.emotion.unique()):
      for emotion in range(len(df_explore.emotion.unique())):      
          pixel = df_explore[df_explore.emotion == num_emotion].pixels.iloc[img_count]
          pixel = np.array(pixel.split(' ')).reshape(48, 48).astype('float32')
          img_count += 1

          axis = plt.subplot(len(df_explore.emotion.unique()),
                             len(df_explore.emotion.unique()),
                             img_count)
          axis.imshow(pixel, cmap='gray')
          axis.axis('off')
          axis.set_title(decoded_emotions[num_emotion], fontsize=12)
  return plt.show()
In [6]:
def plot_one_emotion(emotion, location):
  pixel = df_explore[df_explore.emotion == emotion].pixels.iloc[location]
  pixel = np.array(pixel.split(' ')).reshape(48, 48).astype('float32')
  plt.title(decoded_emotions[emotion], fontsize=12, weight='bold')
  return plt.imshow(pixel, cmap='gray')
In [47]:
def plot_augmentation_samples(train_datagen):
  fig = plt.figure(figsize=(25, 10))
  fig.suptitle("Data Augmentation Samples", fontsize=20, weight='bold')
  rows = 3
  columns = 10
  iterator = train_datagen.flow(x_train, batch_size=64)

  for i in range(rows * columns):
    fig.add_subplot(rows, columns, i+1)
    plt.grid(False)
    batch = iterator.next()
    plt.imshow(np.squeeze(batch[0]), cmap=plt.cm.gray)
    plt.xticks([])
    plt.yticks([])
  save_figure("Data Augmentation Samples", tight_layout=False)
  plt.show()
In [7]:
# helper function for training the model i.e. running for epochs
def train_model(model, epoch, batch):  
  callbacks_list = [
                    EarlyStopping( # if the model doesn't improve its validation accuracy for up to 5 epochs, it will stop training
                        monitor='val_accuracy',
                        patience=3, # represents the number of epochs with no improvement to which the training will be terminated
                        verbose=1,
                        restore_best_weights=True
                        ),
                    ModelCheckpoint( # if the model reaches an optimal validation loss, it will be saved onto the directory so it can be loaded later
                        filepath='best-model.h5',
                        verbose=1,
                        monitor='val_loss',
                        save_best_only=True,
                        ),
                    ReduceLROnPlateau(
                        monitor='val_loss',
                        factor=np.sqrt(0.1),
                        patience=3,
                        verbose=1,
                        min_delta=0.0001
                        )
                    ]
                    
  return model.fit(x_train, 
                   y_train,
                   epochs = epoch,
                   batch_size = batch,
                   callbacks=callbacks_list,
                   validation_data = (x_val, y_val))
In [8]:
# helper function for retrieving a data augmentation generator
def get_augmented_generator(train_datagen, batch):
  return train_datagen.flow(x_train, y_train, batch_size=batch)
In [9]:
# helper function for training the model with the data augmentation generator
def train_augmented_model(model, train_datagen, epoch, batch):  
  callbacks_list = [
                    EarlyStopping( # if the model doesn't improve its validation accuracy for up to 5 epochs, it will stop training
                        monitor='val_accuracy',
                        patience=3, # represents the number of epochs with no improvement to which the training will be terminated
                        verbose=1,
                        restore_best_weights=True
                        ),
                    ModelCheckpoint( # if the model reaches an optimal validation loss, it will be saved onto the directory so it can be loaded later
                        filepath='best-model.h5',
                        verbose=1,
                        monitor='val_loss',
                        save_best_only=True,
                        ),
                    ReduceLROnPlateau(
                        monitor='val_loss',
                        factor=0.2,
                        patience=3,
                        verbose=1,
                        min_delta=0.0001
                        ),
                    ]

  return model.fit(get_augmented_generator(train_datagen, batch),
                   epochs = epoch,
                   steps_per_epoch = x_train.shape[0] // batch,
                   validation_data = (x_val, y_val),
                   callbacks=callbacks_list,
                   validation_steps = len(x_val) // batch)
In [10]:
# retrieves all history keys of the model
def get_history_keys(history):
  return (history.history,
          history.history["loss"],
          history.history["val_loss"], 
          history.history["accuracy"],
          history.history["val_accuracy"])

# plots both the model loss and accuracy
def plot_model_history(history, name=""):
  history_dict, loss, val_loss, acc, val_acc = get_history_keys(history) # get histories
  epochs = range(1, len(loss) + 1)
  blue_dots = 'bo'
  solid_blue_line = 'b'

  # defining the history subplots 
  values, axis = plt.subplots(1, 2)
  values.suptitle(name, fontsize=14)
  values.set_size_inches(14, 6)

  # plotting the values on the axis for each subplot
  axis[0].plot(epochs, loss, blue_dots, label = 'Training loss')
  axis[0].plot(epochs, val_loss, solid_blue_line, label = 'Validation loss')
  axis[1].plot(epochs, acc, blue_dots, label = 'Training acc')
  axis[1].plot(epochs, val_acc, solid_blue_line, label = 'Validation acc')

  # defining the labels
  plt.setp(axis[0], xlabel='Epochs')
  plt.setp(axis[0], ylabel='Loss')
  plt.setp(axis[1], xlabel='Epochs')
  plt.setp(axis[1], ylabel='Accuracy')
  axis[0].set_title('Training and validation loss')
  axis[0].legend()
  axis[1].set_title('Training and validation acc')
  axis[1].legend()

  # printing out minimum/maximum validation loss and accuracy
  print(f"\nMin validation loss: {str(min(val_loss))} \nMax validation loss: {str(max(val_loss))} \nMin validation acc: {str(min(val_acc))} \nMax validation acc: {str(max(val_acc))}")
In [11]:
# retrieves all history keys from the two models
def get_comparison_history_keys(history1, history2):
  return (history1.history,
          history2.history,
          history1.history["val_loss"], 
          history2.history["val_loss"], 
          history1.history["val_accuracy"],
          history2.history["val_accuracy"])


def compare_model_history(history1, history2, name="", key1="", key2=""):
  history_dict1, history_dict2, model_1_val_loss, model_2_val_loss, model_1_val_acc, model_2_val_acc = get_comparison_history_keys(history1, history2)
  epochs = range(1, len(model_1_val_loss) + 1)

  # defining the history subplots 
  values, axis = plt.subplots(1, 2)
  values.suptitle(name, fontsize=14)
  values.set_size_inches(14, 6)

  # plotting the values on the axis for each subplot
  axis[0].plot(epochs, model_1_val_loss, 'g', label = key1 + ' Loss')
  axis[0].plot(epochs, model_2_val_loss, 'b', label = key2 + ' Loss')
  axis[1].plot(epochs, model_1_val_acc, 'g', label = key1 + ' Accuracy')
  axis[1].plot(epochs, model_2_val_acc, 'b', label = key2 + ' Accuracy')

  # defining the labels
  plt.setp(axis[0], xlabel='Epochs')
  plt.setp(axis[0], ylabel='Loss')
  plt.setp(axis[1], xlabel='Epochs')
  plt.setp(axis[1], ylabel='Accuracy')
  axis[0].set_title('Validation Loss')
  axis[0].legend()
  axis[1].set_title('Validation Accuracy')
  axis[1].legend()

  # printing out minimum/maximum validation loss and accuracy for each model
  print(f"\n{key1} validation loss: {str(min(model_1_val_loss))} \n{key2} validation loss: {str(min(model_2_val_loss))}")
  print(f"\n{key1} validation accuracy: {str(max(model_1_val_acc))} \n{key2} validation accuracy: {str(max(model_2_val_acc))}")
In [12]:
# helper function to plot the grid search results
def plot_grid_search_results(results):
    print(f"\nBest score = {'%.2f' % results.best_score_} using {results.best_params_}\n")
    mean_score = results.cv_results_['mean_test_score']
    standard_deviation = results.cv_results_['std_test_score']
    parameters = results.cv_results_['params']
    optimised_model = grid_searcher_result.best_estimator_
    accuracy = optimised_model.score(x_test, y_test)

    for mean, stdev, param in zip(mean_score, standard_deviation, parameters):
        print('mean test accuracy +/- std = {:.4f} +/- {:.4f} with: {}'.format(mean, stdev, param))
    print("\nAccuracy achieved on the best model")
    print("{:.2f}%".format(accuracy * 100))
In [13]:
def save_figure(figure_name, tight_layout=True, fig_extension="png", resolution=300):
    path = os.path.join(".", figure_name + "." + fig_extension)
    print(figure_name, "has been saved")
    if tight_layout:
        plt.tight_layout()
    plt.savefig(path, format=fig_extension, dpi=resolution)

Loading and Inspecting the Data

In [14]:
df = pd.read_csv('/content/drive/MyDrive/Full-FER-2013-Dataset/fer2013.csv')
df.head()
Out[14]:
emotion pixels Usage
0 0 70 80 82 72 58 58 60 63 54 58 60 48 89 115 121... Training
1 0 151 150 147 155 148 133 111 140 170 174 182 15... Training
2 2 231 212 156 164 174 138 161 173 182 200 106 38... Training
3 4 24 32 36 30 32 23 19 20 30 41 21 22 32 34 21 1... Training
4 6 4 0 0 0 0 0 0 0 0 0 0 0 3 15 23 28 48 50 58 84... Training
In [ ]:
df.tail()
Out[ ]:
emotion pixels Usage
35882 6 50 36 17 22 23 29 33 39 34 37 37 37 39 43 48 5... PrivateTest
35883 3 178 174 172 173 181 188 191 194 196 199 200 20... PrivateTest
35884 0 17 17 16 23 28 22 19 17 25 26 20 24 31 19 27 9... PrivateTest
35885 3 30 28 28 29 31 30 42 68 79 81 77 67 67 71 63 6... PrivateTest
35886 2 19 13 14 12 13 16 21 33 50 57 71 84 97 108 122... PrivateTest
In [ ]:
df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 35887 entries, 0 to 35886
Data columns (total 3 columns):
 #   Column   Non-Null Count  Dtype 
---  ------   --------------  ----- 
 0   emotion  35887 non-null  int64 
 1   pixels   35887 non-null  object
 2   Usage    35887 non-null  object
dtypes: int64(1), object(2)
memory usage: 841.2+ KB
In [ ]:
df.describe()
Out[ ]:
emotion
count 35887.000000
mean 3.323265
std 1.873819
min 0.000000
25% 2.000000
50% 3.000000
75% 5.000000
max 6.000000
In [ ]:
df['emotion'].value_counts()
Out[ ]:
3    8989
6    6198
4    6077
2    5121
0    4953
5    4002
1     547
Name: emotion, dtype: int64
In [ ]:
df['pixels'].value_counts()
Out[ ]:
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                   12
181 179 177 176 175 175 173 171 169 167 164 163 162 161 159 156 154 140 125 93 80 88 94 90 96 64 45 51 86 99 130 150 166 173 172 175 174 168 172 177 177 179 179 180 181 182 183 182 179 178 176 175 174 173 172 170 168 166 165 162 160 158 156 152 149 132 114 120 136 154 161 161 159 154 133 98 92 67 123 130 148 171 173 169 168 175 177 177 178 179 179 181 182 179 175 175 178 177 175 174 174 173 170 168 166 165 163 159 158 156 154 151 146 148 155 167 174 179 179 175 175 182 192 197 190 141 123 123 129 166 168 168 175 176 176 176 177 179 180 176 172 174 180 184 177 176 174 173 172 171 167 165 163 162 159 155 154 153 149 148 149 153 155 159 171 182 178 170 173 177 185 190 197 210 193 147 117 149 177 175 174 175 176 178 178 174 171 173 179 183 183 183 186 185 185 184 182 180 178 175 172 169 166 163 163 153 143 144 151 156 164 171 175 180 178 173 171 180 186 189 196 200 207 211 151 116 163 174 175 176 177 173 170 173 179 181 181 181 182 183 193 192 191 189 187 185 183 180 176 173 170 170 150 138 142 147 153 159 165 171 178 180 179 176 170 173 177 190 207 208 210 210 209 145 124 157 173 172 167 172 179 179 179 180 181 181 182 182 193 192 191 188 187 184 182 180 177 173 166 141 128 142 145 152 154 156 153 159 170 174 176 178 171 179 191 201 206 206 205 200 202 209 144 122 159 171 175 178 179 178 178 180 180 181 181 183 193 192 191 189 187 185 183 181 176 174 144 119 150 143 128 129 146 154 146 151 163 171 176 181 184 191 199 199 197 188 183 175 174 180 195 150 154 178 176 177 178 179 178 179 181 182 182 181 194 193 192 189 187 185 183 181 179 157 116 137 128 104 96 104 123 141 142 143 150 159 169 179 191 197 198 193 191 192 182 166 162 162 177 194 172 173 176 177 177 179 179 180 179 175 173 174 194 193 191 189 187 186 184 183 173 116 111 115 103 101 94 99 114 128 136 138 145 147 158 175 189 197 196 195 201 204 196 196 202 201 189 176 174 171 176 178 179 178 172 170 171 175 181 184 194 194 191 189 187 186 184 187 145 96 117 113 135 145 138 124 114 125 131 140 133 132 148 173 193 198 197 206 208 208 202 201 209 218 219 201 184 167 182 173 170 170 174 178 181 181 181 182 194 193 191 190 187 186 185 183 118 95 122 139 138 140 152 146 129 118 122 135 126 123 147 174 200 201 201 214 209 200 189 198 212 215 221 220 207 160 180 174 173 179 180 180 180 180 181 182 194 193 191 190 188 186 187 166 92 96 132 128 107 103 113 130 137 122 109 118 120 128 155 177 197 208 214 215 200 185 189 177 166 164 190 214 219 172 176 190 177 178 178 179 180 181 181 182 194 194 192 190 188 186 189 143 79 112 125 93 84 95 101 99 115 122 103 100 116 148 172 181 199 220 217 204 179 171 148 123 131 145 145 169 208 186 167 195 178 178 179 179 180 180 181 181 194 193 192 191 189 187 188 125 81 120 96 98 141 152 139 107 103 110 93 83 113 145 161 174 203 222 210 185 164 147 139 150 164 198 212 174 171 177 151 195 181 177 178 179 179 180 181 182 194 194 192 191 189 187 186 120 85 112 105 165 157 126 136 152 97 88 83 74 116 136 158 184 194 210 200 175 156 129 138 130 123 134 223 234 187 165 131 192 182 177 178 179 180 178 178 179 193 193 193 192 190 187 187 122 88 113 127 149 91 72 87 110 98 67 86 110 128 142 163 179 188 196 197 179 156 118 119 117 87 113 203 237 208 174 106 184 183 173 174 171 170 171 173 175 194 192 193 191 190 186 190 132 84 127 119 111 90 85 110 110 106 94 117 132 137 147 148 157 178 198 210 202 149 139 143 149 156 167 182 200 213 200 108 173 181 169 173 173 176 179 181 182 194 193 191 191 188 185 190 146 93 143 129 127 129 131 118 100 84 100 126 122 140 157 153 153 169 191 209 221 192 149 138 153 174 197 211 217 225 216 130 159 186 176 179 178 178 180 180 181 193 192 191 190 188 185 188 159 122 138 119 114 107 94 81 79 101 126 114 118 136 156 159 158 168 180 199 221 222 201 171 155 158 182 207 216 219 223 159 154 181 175 177 178 177 179 179 180 192 190 189 188 187 185 184 174 141 126 112 105 99 99 103 116 122 112 107 118 128 146 161 166 168 172 183 215 223 218 209 193 180 182 199 215 220 224 185 157 178 176 176 177 177 179 179 180 191 189 187 186 184 182 183 176 137 130 125 120 116 117 120 113 96 104 112 126 144 158 162 172 174 174 183 200 215 207 208 208 197 192 195 201 206 216 205 166 176 175 176 175 173 178 179 180 190 188 186 184 183 181 183 164 132 131 125 124 123 121 111 92 93 117 118 128 147 160 167 177 179 185 199 198 202 186 181 205 208 196 194 199 205 209 215 176 174 176 176 177 175 178 180 181 187 186 184 183 182 180 183 154 131 125 124 123 122 112 97 87 117 121 118 129 140 146 164 176 184 209 218 210 205 188 161 183 210 205 200 200 207 206 215 184 169 173 172 174 171 174 174 175 184 183 183 182 181 179 181 149 128 126 124 122 112 101 85 96 123 115 115 129 136 135 153 176 199 213 214 210 209 203 165 158 194 210 205 206 210 211 212 188 165 169 168 170 171 172 173 174 182 182 182 181 180 179 180 143 127 125 121 115 105 93 75 96 114 107 109 121 133 135 156 178 197 199 203 211 211 210 173 144 172 204 206 208 215 218 213 191 170 175 175 175 177 178 179 181 181 181 180 180 180 179 181 146 125 123 114 107 99 80 69 94 96 79 109 117 121 132 158 174 189 205 178 142 178 206 187 133 150 186 207 215 218 220 218 190 169 173 174 175 176 177 179 181 180 179 178 179 180 178 180 147 122 120 111 104 92 66 73 89 87 54 52 116 128 133 140 163 192 127 31 60 147 201 200 147 124 168 198 216 218 220 217 188 169 173 174 175 176 177 179 181 178 178 178 178 179 178 181 149 120 115 108 101 83 61 88 94 89 89 64 97 155 153 138 179 180 114 108 169 195 202 202 177 113 144 182 208 211 215 216 183 168 172 173 174 176 178 179 181 177 177 177 177 177 176 182 156 117 112 105 97 71 68 95 91 88 84 91 97 149 159 148 187 193 201 211 200 203 204 204 200 129 121 172 201 210 213 213 180 169 173 174 174 176 176 179 181 177 176 175 175 175 175 180 164 116 113 104 91 59 75 89 80 73 67 72 78 103 131 147 178 181 179 182 175 186 201 203 205 157 110 161 194 209 213 209 168 162 167 169 172 175 178 179 182 176 175 174 174 175 175 177 173 123 110 103 79 55 74 75 66 62 64 74 77 84 103 127 141 153 155 151 139 135 165 184 202 177 111 151 193 209 213 203 165 167 166 165 166 167 170 172 176 175 173 172 172 174 174 175 179 139 111 104 71 56 69 67 65 57 67 83 81 84 91 107 112 119 122 122 130 116 128 152 177 185 121 142 195 211 213 189 162 168 169 170 173 173 174 176 176 173 171 170 172 173 173 174 178 161 117 106 73 67 66 66 63 60 65 75 75 85 87 100 91 98 104 105 117 115 121 136 153 170 133 140 198 212 207 172 162 166 167 169 173 174 178 180 183 170 169 169 170 171 172 173 173 175 132 108 80 71 65 63 62 59 55 65 85 82 91 93 81 83 95 94 98 103 120 120 148 154 140 149 201 213 193 162 163 164 166 168 171 173 177 180 182 170 169 167 167 169 169 171 171 176 156 113 86 74 73 77 76 62 66 71 79 84 93 83 91 86 87 93 94 106 127 135 143 155 146 167 207 206 170 158 163 164 165 167 170 172 177 180 183 169 167 165 165 166 168 169 170 171 173 132 94 76 87 105 98 93 91 95 89 93 99 88 99 105 110 108 114 131 137 159 163 156 149 177 210 189 157 158 157 160 164 167 169 172 176 179 183 167 165 163 164 165 166 167 168 169 173 162 107 80 91 109 106 112 118 107 107 107 113 119 119 124 133 149 156 169 181 189 192 146 156 195 205 162 155 160 159 159 161 164 167 171 176 179 182 165 163 159 159 162 163 164 165 166 167 172 136 91 96 117 120 96 135 138 126 140 158 142 142 173 170 183 205 175 120 174 174 138 163 200 169 149 156 156 158 161 163 165 167 169 173 178 183 165 161 154 152 152 154 159 162 163 168 171 161 103 98 121 141 75 29 56 115 159 179 176 175 204 210 200 124 44 30 143 153 141 175 203 182 161 149 151 154 159 162 165 168 172 175 178 179 157 147 147 147 156 164 156 162 175 176 169 179 124 97 110 143 102 24 18 51 103 128 130 162 171 126 45 0 6 36 122 143 158 193 221 227 218 198 184 167 160 160 162 165 172 175 180 183 166 163 171 165 174 184 158 150 167 175 161 173 140 94 101 118 135 38 33 66 87 93 107 137 129 75 27 20 22 66 133 131 159 210 234 232 236 229 215 215 191 180 182 173 168 175 181 185 176 177 182 165 163 182 163 147 155 173 163 163 142 83 78 93 153 81 45 67 88 105 139 146 131 64 30 47 42 114 141 132 162 226 239 236 234 209 202 213 200 192 185 211 192 177 180 187 180 173 181 172 162 176 172 151 150 165 168 153 125 77 58 83 133 129 93 73 77 105 142 160 122 27 43 69 71 167 127 110 177 237 237 236 225 202 200 214 206 205 189 192 226 208 185 186 187 177 185 188 171 170 179 161 150 159 174 161 119 70 50 67 108 136 144 100 50 89 125 139 62 34 110 68 120 192 104 84 181 235 233 230 223 212 197 198 217 203 218 189 212 231 217 188 193 183 191 200 186 171 183 171 151 157 169 174 134 71 50 62 96 136 142 160 95 37 51 40 31 120 149 76 173 175 88 94 180 223 234 223 222 227 206 188 207 212 207 217 203 230 229 203 194 187 201 204 192 178 186 178 159 163 160 175 163 98 55 59 89 120 115 131 179 111 81 92 131 205 102 113 200 138 79 115 199 212 231 219 223 230 226 197 191 222 202 214 213 226 223 233 199 196 212 205 194 183 186 187 167 175 164 165 180 139 74 57 77 108 120 83 106 158 164 161 147 93 90 174 183 112 88 126 204 204 217 231 234 232 238 215 186 218 213 203 224 225 220 243                                                                                                                                                                                                      7
42 41 47 48 46 54 59 62 73 82 97 100 97 103 107 106 106 105 101 100 95 88 87 85 83 82 89 95 92 86 82 81 77 77 73 61 66 94 102 58 65 54 71 44 59 73 78 63 40 39 43 49 51 43 47 55 68 79 89 87 84 90 96 98 97 96 94 92 89 87 84 80 81 83 85 89 88 82 78 76 73 72 68 66 71 63 55 64 58 51 56 64 73 64 78 82 40 40 38 49 49 35 47 56 63 75 80 80 80 79 78 80 82 82 83 80 80 80 77 74 76 81 80 79 76 70 65 66 67 63 65 53 37 25 31 47 26 24 18 22 29 21 34 65 40 39 35 45 45 40 49 50 56 67 76 77 77 75 72 68 69 72 71 72 72 71 69 69 71 73 73 72 66 60 57 57 56 54 60 47 32 28 29 23 21 19 16 20 19 37 54 81 40 43 37 39 47 44 40 48 52 59 69 75 76 72 66 61 61 64 63 65 62 61 63 66 68 67 64 63 61 57 55 54 53 54 59 47 30 15 14 62 71 36 22 47 64 61 60 52 45 46 40 42 45 42 38 48 57 64 70 72 71 68 64 60 57 57 57 57 57 56 59 63 64 64 60 57 55 55 55 57 55 60 61 55 57 65 51 80 89 63 56 57 73 66 61 58 42 41 44 47 45 40 41 52 64 70 72 68 65 65 63 60 57 56 55 53 56 60 61 65 66 62 57 55 59 61 66 69 67 67 66 56 46 60 70 84 89 75 65 59 63 55 54 66 39 42 50 48 39 38 49 58 65 68 72 65 66 70 67 64 62 61 62 64 65 66 67 67 67 61 58 57 65 69 76 75 70 67 67 54 27 55 72 61 61 74 73 67 63 64 61 58 40 44 46 42 44 53 60 61 60 71 74 75 76 74 72 70 73 72 73 75 75 76 77 72 70 66 64 56 67 72 74 72 67 61 68 62 23 24 34 46 53 68 65 68 75 79 72 53 40 42 43 48 59 65 65 66 64 65 67 78 82 77 75 74 76 76 75 74 75 74 73 74 72 72 66 54 68 76 69 62 55 52 69 76 42 31 67 86 71 70 69 69 53 53 59 59 42 43 49 59 69 67 67 67 69 67 69 76 80 79 76 73 68 64 62 62 63 62 60 69 67 67 74 65 71 71 60 57 51 51 59 67 63 61 80 57 46 54 60 50 42 44 52 50 42 46 55 65 69 69 69 69 72 76 80 82 80 83 79 72 66 58 53 53 53 53 54 64 66 66 78 69 73 64 60 60 50 50 53 53 43 34 37 37 39 44 46 46 44 44 42 34 35 44 57 71 72 72 72 74 76 80 83 85 87 88 81 68 66 58 49 46 49 53 58 60 56 56 74 60 58 54 59 62 45 41 41 42 43 36 40 39 38 35 35 37 41 37 30 21 37 53 66 74 73 72 76 80 81 82 81 79 80 77 74 63 60 55 50 51 57 62 63 54 46 45 62 47 39 35 37 39 30 24 20 26 38 38 38 38 38 36 34 31 32 36 33 21 44 61 75 79 74 75 79 83 88 86 77 71 61 54 56 52 41 38 38 37 39 39 41 35 29 37 48 31 22 19 15 17 19 18 22 33 37 29 35 36 34 32 27 25 22 23 30 28 48 66 76 77 75 77 81 89 95 85 62 49 35 31 30 25 21 22 22 18 15 15 19 20 26 41 41 26 16 15 16 21 24 29 33 48 35 21 27 26 24 24 20 17 17 12 15 20 47 65 76 79 79 77 82 91 80 63 45 36 30 32 32 25 24 24 24 23 17 13 19 21 32 45 44 29 11 17 20 23 30 32 33 53 35 12 19 25 28 38 39 27 27 21 16 13 59 67 75 79 80 80 84 78 54 45 41 41 45 49 50 44 40 35 30 24 19 13 21 28 42 63 70 47 12 10 15 18 24 31 46 60 32 21 72 105 122 120 108 83 66 53 34 17 72 73 73 76 78 80 83 69 55 43 37 30 28 32 38 43 40 31 21 15 16 20 26 40 58 76 90 64 29 18 12 11 20 41 65 68 34 65 110 112 115 115 106 96 83 68 49 27 76 76 74 72 74 78 80 73 66 59 56 49 35 22 16 20 24 20 16 18 28 38 40 56 73 86 97 77 48 39 30 31 39 55 65 63 42 96 112 102 104 97 87 84 68 56 46 33 79 80 74 71 76 82 83 78 81 81 80 74 58 48 38 27 23 24 26 37 48 56 57 68 82 96 101 93 61 49 40 38 44 55 57 57 76 118 130 127 118 97 81 74 57 50 43 35 80 80 77 73 74 82 84 80 83 90 90 82 78 73 64 51 41 39 42 50 58 64 68 76 85 100 103 96 72 56 55 42 43 51 56 60 113 119 98 88 93 87 72 64 48 39 29 23 78 81 81 77 76 81 81 81 83 91 92 87 81 76 69 58 55 60 59 56 61 66 68 76 84 100 103 98 77 55 60 58 51 54 52 65 123 99 38 17 23 56 58 54 69 65 47 16 76 83 83 79 81 80 74 75 78 84 93 99 97 97 92 81 73 65 57 60 63 64 66 75 83 104 106 102 79 53 58 69 61 52 48 62 131 157 127 56 42 58 59 37 65 84 57 16 72 82 81 80 80 76 71 74 71 76 89 89 90 87 81 69 65 61 60 59 57 58 63 73 84 106 109 102 87 51 53 70 69 60 53 86 146 110 90 43 30 33 24 16 23 29 20 14 69 79 79 80 78 74 71 70 72 81 84 84 84 81 74 67 64 62 58 51 46 55 67 76 90 112 108 92 90 70 56 64 71 69 62 74 47 43 51 22 12 23 32 69 71 51 35 27 68 77 80 80 78 74 72 73 75 80 78 78 78 71 65 63 61 58 54 43 44 71 89 95 103 113 96 81 75 84 88 66 66 68 62 35 46 111 126 114 53 19 53 50 16 9 11 11 70 76 79 80 78 75 74 76 76 76 77 74 67 62 59 55 57 56 47 41 65 104 104 100 100 96 82 72 67 66 78 66 60 62 61 90 121 102 99 95 52 29 35 10 6 10 9 12 76 77 79 81 78 76 76 74 73 74 72 67 60 57 55 51 51 44 41 53 79 92 81 74 72 66 63 60 55 46 50 56 58 61 93 100 94 120 99 80 38 14 16 8 11 9 10 9 81 79 79 80 79 76 76 74 70 70 68 64 58 53 48 45 40 34 47 62 62 40 29 30 40 58 57 48 37 37 54 52 53 62 89 106 110 56 31 51 47 44 14 9 11 10 11 8 82 81 81 80 80 77 77 76 68 70 72 65 57 53 45 40 34 43 64 67 49 29 24 18 23 45 47 43 37 51 65 52 47 66 113 103 55 63 94 76 55 116 30 6 6 8 9 7 82 80 82 81 80 77 80 80 65 70 73 65 54 52 44 36 41 57 72 76 69 57 53 42 33 27 32 42 50 59 64 54 47 68 83 105 120 99 42 7 0 85 77 4 6 4 3 4 81 80 82 81 80 80 84 82 65 70 72 65 55 47 37 39 48 59 69 73 76 70 63 49 40 39 43 45 54 61 62 55 49 67 82 85 67 20 3 14 15 26 97 40 11 10 10 15 82 82 82 81 81 79 83 87 67 71 70 63 51 39 39 42 49 62 70 73 78 75 69 54 48 44 48 51 53 53 54 54 50 77 90 59 38 19 11 13 16 12 55 77 34 26 25 26 82 82 83 82 83 79 80 90 71 71 73 62 46 42 47 49 58 68 74 75 76 74 66 63 65 64 62 58 63 63 48 48 54 93 91 63 40 20 14 11 8 12 10 32 46 32 26 25 83 85 83 83 83 82 79 91 78 68 78 63 42 42 45 48 48 50 50 53 48 51 43 43 49 51 42 37 37 47 50 43 66 126 107 63 36 18 13 10 10 13 11 6 22 28 28 27 84 84 85 81 80 84 81 87 83 64 80 65 39 40 37 25 11 11 15 29 24 35 40 46 39 31 25 23 35 48 48 48 103 138 116 65 33 18 17 15 12 14 14 8 7 7 15 35 87 81 84 81 79 85 83 82 84 64 75 69 41 43 40 21 13 18 20 20 23 19 17 20 14 12 12 10 24 48 42 85 134 111 87 57 30 18 20 19 15 13 12 10 10 8 11 15 86 82 80 82 80 85 82 81 88 67 70 74 43 43 43 31 20 16 18 23 28 24 20 18 11 16 22 17 22 36 58 125 133 104 75 51 23 18 19 19 17 13 9 9 10 9 15 18 142 79 83 82 78 82 84 79 83 71 64 79 43 45 51 37 29 30 26 15 22 23 22 15 14 19 26 28 32 44 86 140 132 102 75 39 16 18 18 15 16 12 7 8 10 9 16 19 241 114 68 87 75 72 85 80 79 76 58 77 47 49 56 48 34 37 58 30 23 24 17 14 19 27 31 35 39 53 116 146 125 99 67 31 17 18 19 14 13 10 7 6 10 13 20 20 255 224 80 72 89 66 79 82 76 81 54 74 48 51 59 53 43 37 51 77 65 54 47 39 39 39 36 35 42 72 136 138 115 87 53 27 20 20 19 15 13 10 7 9 13 16 20 21 252 255 199 61 80 66 70 84 77 85 61 70 53 48 60 55 50 46 43 69 76 69 64 53 47 44 37 42 48 99 141 128 102 70 44 28 22 21 18 14 11 9 7 12 16 19 22 24 255 251 255 193 70 64 61 83 81 84 65 68 62 41 61 61 62 50 52 49 55 59 61 58 44 39 39 46 68 126 134 114 86 59 39 28 21 21 19 15 12 11 12 15 20 23 24 24 255 254 248 255 202 84 50 65 85 83 72 59 69 42 58 62 72 61 53 52 55 59 59 46 36 40 44 55 81 129 133 112 77 47 33 26 22 21 18 14 13 14 13 19 25 26 25 24 255 255 252 242 250 212 83 27 60 85 80 56 71 48 52 63 70 72 62 47 44 41 38 39 45 48 54 54 120 143 104 88 66 44 34 27 23 21 18 15 14 12 20 28 28 28 28 26 255 254 253 247 243 246 218 83 19 54 81 63 66 58 54 54 63 63 59 57 52 45 45 49 52 46 48 57 211 253 197 126 52 24 31 32 23 21 18 16 14 14 30 35 30 31 28 25 255 252 251 252 251 242 241 215 73 15 50 71 65 66 56 50 52 53 54 54 48 44 43 43 42 41 39 82 236 255 255 255 217 134 52 19 22 23 19 15 17 24 25 32 32 30 28 24                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                          7
6 25 36 36 95 142 147 166 176 178 174 172 169 159 148 154 172 180 185 188 192 211 213 214 219 209 191 182 180 179 165 148 143 140 137 136 141 156 175 178 171 154 137 82 33 51 46 14 13 38 35 41 113 137 153 163 168 169 162 152 148 146 147 146 158 172 179 191 190 204 216 220 223 210 194 188 186 182 155 133 136 147 144 125 112 108 132 166 171 156 140 102 46 50 58 32 28 48 40 57 120 142 156 161 164 152 136 146 151 154 143 134 139 154 171 184 185 196 221 227 225 211 187 185 186 171 133 135 148 144 143 130 92 44 40 92 146 155 149 119 66 57 64 45 36 51 48 71 126 149 157 162 139 100 86 118 149 147 156 156 136 140 163 176 178 188 217 232 228 206 183 182 193 154 125 136 169 182 119 140 221 195 81 11 82 144 150 131 80 68 82 53 42 67 55 80 130 145 152 125 58 44 118 173 165 180 183 157 149 134 152 179 173 179 211 232 227 203 174 189 205 150 123 162 227 75 18 17 106 231 192 85 43 118 151 142 100 79 100 65 58 83 64 96 131 140 129 49 40 172 234 112 34 58 199 219 146 133 150 192 174 170 204 230 226 202 162 193 216 140 149 247 200 25 90 36 54 192 184 139 76 119 159 169 129 95 109 75 71 95 74 119 135 133 95 37 131 226 151 12 62 43 87 253 222 155 147 197 176 160 195 226 225 198 162 174 218 142 152 209 208 79 21 33 119 206 134 68 88 131 155 178 154 108 115 77 82 102 91 136 149 127 75 78 155 193 157 29 45 42 114 242 208 142 154 193 175 166 187 212 213 189 164 162 203 186 147 129 140 122 107 97 116 112 97 100 118 147 156 160 157 114 119 81 89 109 110 152 172 140 108 65 94 156 204 120 49 71 146 161 137 182 194 181 177 166 178 197 198 179 165 162 189 193 216 200 174 178 174 167 140 136 158 153 159 174 181 170 152 124 116 98 76 111 124 163 173 140 123 93 78 92 110 127 122 154 162 163 196 215 181 183 174 165 175 184 190 180 163 159 176 191 189 196 192 188 190 193 193 186 184 180 183 188 184 182 159 128 114 112 91 98 131 161 167 157 155 137 130 136 140 143 161 180 181 188 200 186 184 185 170 163 166 185 189 187 160 158 162 185 196 190 196 197 197 197 197 195 198 198 195 193 192 181 163 131 114 107 126 92 131 157 173 177 176 165 155 162 174 184 183 184 188 199 194 186 194 183 172 161 162 189 195 189 163 153 166 172 191 207 201 196 198 203 208 206 211 210 202 198 194 181 167 135 112 111 160 96 129 160 176 181 181 180 175 182 188 193 195 201 210 201 195 199 190 181 172 156 162 187 197 196 169 152 164 167 177 202 216 208 202 194 196 200 203 205 204 203 188 182 171 142 114 129 188 117 125 164 180 182 185 194 199 201 205 207 204 211 206 206 210 203 188 178 170 156 168 188 195 196 179 155 164 168 168 183 208 221 222 218 209 206 210 211 208 205 190 178 172 148 114 138 205 145 128 168 179 183 193 201 201 202 202 204 207 211 214 219 213 198 182 176 167 157 168 187 196 198 176 162 164 171 167 171 191 212 225 229 230 223 218 219 211 202 192 175 173 158 115 137 215 166 131 167 174 184 198 203 207 211 208 211 217 220 224 218 207 193 178 169 157 157 169 191 203 196 175 164 163 159 168 180 184 194 208 219 225 225 220 218 212 197 188 179 164 157 114 128 221 178 125 162 172 185 199 207 214 215 218 220 225 225 223 219 205 190 170 152 154 161 171 188 202 201 176 161 158 157 158 174 196 191 200 212 217 220 212 213 204 195 183 179 165 155 112 106 219 197 132 154 167 184 194 204 210 217 222 223 223 221 218 213 204 191 155 148 157 166 174 186 209 209 177 161 157 144 144 151 182 207 204 207 216 218 217 210 200 193 186 180 171 157 100 120 211 220 146 143 165 178 190 198 208 212 220 221 220 220 216 212 209 169 134 147 162 167 174 194 223 219 188 173 161 144 141 135 147 191 210 208 211 219 215 208 195 188 187 182 171 157 100 169 207 232 168 139 163 177 186 191 204 208 216 215 216 216 212 215 188 136 132 152 163 166 178 200 230 231 200 176 156 156 153 145 131 157 194 213 211 213 212 207 198 191 189 182 173 147 125 208 207 235 190 135 156 172 181 187 198 201 208 208 210 212 212 201 154 131 150 158 163 164 176 196 227 237 207 183 165 162 164 165 150 133 169 190 209 210 209 201 198 187 184 183 172 127 152 221 173 233 213 134 151 169 176 189 197 200 204 208 205 207 203 179 136 155 172 164 169 172 175 204 231 237 217 190 172 171 174 166 164 129 153 175 191 211 208 202 193 183 183 179 167 124 187 215 148 194 231 154 137 160 172 187 192 193 198 199 199 199 187 161 136 170 181 174 181 178 183 210 221 223 218 196 176 142 115 165 165 124 148 176 174 196 204 200 189 182 178 174 154 126 202 174 194 156 207 194 130 152 167 186 188 190 195 191 190 184 177 159 134 161 181 131 112 150 171 193 201 200 196 174 145 132 163 200 156 135 155 169 176 174 195 198 192 183 172 168 136 147 168 152 225 200 163 204 137 144 168 176 183 185 194 191 182 173 170 155 142 147 189 191 170 159 144 156 176 176 163 142 162 189 190 164 150 159 154 162 175 172 186 196 186 175 168 161 133 146 152 186 224 229 171 153 138 122 158 172 181 187 193 189 176 170 167 156 158 156 159 182 195 198 182 169 171 169 176 192 203 198 186 184 179 171 158 153 167 179 174 188 179 171 167 144 143 160 199 228 187 217 206 178 142 111 144 164 177 184 189 187 169 172 164 153 165 175 185 194 200 209 223 230 227 227 230 230 220 209 203 197 190 178 156 145 158 175 166 182 178 170 162 122 150 185 224 226 152 190 228 223 190 129 122 155 169 181 184 179 167 171 156 150 165 186 196 204 203 209 217 227 229 225 226 221 209 203 198 194 174 159 155 137 140 166 165 173 173 168 138 102 163 208 228 213 109 172 208 229 217 173 106 145 168 177 181 176 171 173 155 146 160 181 190 195 198 204 206 218 216 208 208 206 193 190 184 180 164 145 144 123 127 159 164 172 169 158 107 118 186 218 225 196 65 150 190 219 225 211 132 108 164 171 176 177 173 172 152 140 148 174 180 180 176 178 181 200 218 226 221 195 166 150 141 135 136 125 106 103 121 143 164 171 166 136 94 155 194 221 208 174 67 100 172 196 225 222 186 110 129 169 174 178 175 166 149 130 125 136 134 132 132 137 154 181 225 244 227 194 165 138 141 130 125 112 77 66 112 143 161 169 155 105 120 192 204 216 187 172 74 77 142 181 208 217 196 162 97 151 171 174 177 160 141 108 76 91 111 112 117 127 145 165 169 170 155 139 125 121 125 125 118 81 60 58 99 146 164 167 138 88 158 215 223 207 179 164 57 76 110 161 181 217 222 217 130 104 162 173 175 160 135 92 63 69 97 98 91 97 111 122 122 128 144 160 171 170 91 94 84 34 28 57 92 147 166 162 119 101 192 226 225 203 185 150 27 70 96 156 185 216 231 222 190 86 139 173 171 165 134 80 64 44 49 77 83 79 122 206 209 192 214 218 149 88 89 90 85 50 32 59 88 145 172 149 113 145 206 227 224 204 186 140 6 55 71 114 192 209 227 224 223 117 84 166 176 169 131 76 65 58 57 105 115 109 104 112 110 99 105 99 100 108 114 121 109 63 55 64 94 141 172 127 131 161 212 228 222 202 176 100 2 23 61 89 176 206 222 228 227 180 69 129 181 175 139 88 64 93 86 112 136 137 136 120 118 119 121 124 135 137 139 151 138 101 81 74 110 144 160 119 131 173 222 229 223 197 159 76 5 2 37 76 141 195 216 230 221 214 129 87 166 179 143 108 78 125 137 114 159 165 163 148 139 137 143 152 157 161 168 166 149 130 82 98 117 134 142 106 126 185 220 227 218 197 131 90 5 3 15 64 107 176 204 227 220 211 168 94 117 171 150 119 97 131 147 115 174 175 173 169 157 160 170 173 174 176 175 166 139 98 81 117 115 125 129 123 158 186 209 223 214 193 116 93 5 5 3 46 89 152 193 218 224 208 174 139 108 138 152 123 118 118 176 122 160 175 180 176 163 163 173 173 173 179 178 167 149 110 108 120 110 120 121 151 184 203 211 221 208 177 99 89 6 6 2 25 80 110 176 208 224 206 192 181 132 109 134 128 124 115 154 136 144 167 184 181 168 158 161 166 170 179 195 178 146 143 120 116 105 116 116 154 192 213 225 222 202 155 83 80 9 8 5 12 78 90 146 196 219 216 210 196 159 107 117 130 122 119 153 146 132 169 186 186 180 166 159 156 167 183 208 154 125 127 119 113 105 116 113 169 201 220 230 221 193 126 66 70 12 11 8 4 60 80 109 181 214 226 226 202 175 130 103 119 120 120 131 187 130 169 196 179 187 187 172 173 194 195 201 147 141 167 124 109 106 115 135 192 203 215 225 214 176 96 63 61 9 11 7 1 37 70 75 152 205 226 228 207 189 165 108 107 111 123 132 193 163 154 194 190 172 177 171 170 178 189 171 136 156 142 127 103 109 116 162 206 215 228 225 202 145 76 75 60 8 5 18 36 31 67 79 110 176 215 226 226 206 182 129 102 97 118 133 157 206 145 142 164 162 161 163 154 155 155 129 139 199 150 130 98 118 109 165 205 217 231 223 196 128 64 89 56 2 14 75 45 5 58 81 100 154 202 226 234 220 194 136 99 102 95 132 170 211 194 139 127 127 140 141 133 128 130 151 186 163 153 113 98 124 110 161 206 215 230 222 196 123 60 91 39 37 63 53 14 1 39 76 87 134 187 215 230 225 202 150 94 107 82 109 170 150 189 211 160 142 135 136 121 175 208 226 169 111 129 96 113 132 123 154 199 218 227 212 189 111 59 72 32 56 29 1 4 3 23 68 81 103 166 203 222 226 199 176 141 92 98 80 131 131 165 191 210 226 225 221 221 216 186 148 148 124 90 112 129 154 124 172 197 217 221 198 176 96 60 59 58 5 0 3 5 2 9 51 77 90 140 191 214 224 217 193 185 118 99 88 93 113 152 148 148 163 183 172 178 150 143 157 148 79 97 142 137 156 125 186 204 217 218 189 160 87 59 57 36                                                                                                                                                                                                                                                                                    7
243 222 205 203 200 192 207 188 193 194 202 205 201 196 199 200 223 210 203 187 169 180 194 203 207 211 212 218 220 224 227 226 221 203 179 155 149 204 243 250 233 227 233 242 247 254 254 255 239 215 208 205 190 189 202 195 189 192 195 216 207 196 191 197 210 206 199 185 179 182 196 200 204 206 211 216 219 226 227 229 226 219 203 175 163 199 244 250 240 233 236 243 247 252 254 255 238 216 203 191 188 187 188 192 193 186 195 213 220 193 182 187 202 204 183 179 183 187 194 203 203 202 210 217 221 228 229 233 234 230 217 192 171 200 241 249 244 241 236 245 249 247 252 255 224 208 202 199 187 176 179 186 198 193 204 207 222 204 184 193 205 190 180 179 185 194 198 199 199 202 212 217 221 229 233 233 234 234 232 207 182 198 229 249 248 247 239 248 252 244 243 253 213 211 204 189 178 173 163 172 198 205 198 215 213 205 198 198 194 184 185 186 193 198 197 197 201 207 213 220 227 232 238 238 239 240 240 223 192 194 214 241 248 248 241 248 251 249 243 248 209 207 192 177 177 168 147 174 208 209 205 219 203 203 196 184 184 177 188 189 187 191 192 191 201 209 216 222 228 234 234 233 239 244 242 236 208 193 205 230 243 244 243 245 248 252 246 248 200 199 177 162 178 151 154 194 213 216 218 212 204 194 187 184 186 179 184 193 194 196 192 188 196 206 217 222 230 231 227 234 240 240 241 242 229 195 193 223 234 236 238 241 244 244 249 244 201 183 142 165 171 157 184 196 215 219 205 205 201 196 193 178 169 178 179 184 188 199 196 193 192 211 225 226 227 228 232 238 240 241 241 242 241 224 190 220 227 229 236 240 245 246 251 249 185 164 148 181 160 175 200 192 221 209 206 207 193 202 187 185 169 171 184 178 178 193 198 195 192 210 220 212 219 222 227 234 240 244 245 243 240 237 208 214 222 228 232 232 243 248 251 252 168 157 157 184 165 196 189 197 226 210 213 201 193 199 178 185 191 192 203 189 182 198 206 195 187 211 211 203 222 224 231 235 236 237 239 243 242 242 222 220 219 215 232 235 237 244 250 254 162 153 165 187 171 206 184 189 221 218 213 202 192 189 181 179 172 185 202 199 183 195 206 199 188 212 201 200 224 224 225 232 239 239 239 241 243 245 230 217 237 210 221 232 238 241 246 252 156 157 163 196 165 209 184 174 212 224 216 192 183 179 181 186 180 174 190 208 190 185 199 207 191 208 201 201 215 221 226 231 235 235 232 233 238 244 239 201 228 231 217 230 232 237 245 247 166 152 150 190 172 210 193 161 197 220 209 165 166 146 144 163 173 175 176 198 196 180 189 210 194 202 208 197 199 211 216 219 226 233 241 237 230 230 240 210 199 236 229 229 234 230 239 248 157 140 148 177 157 182 213 168 162 213 211 160 172 138 131 151 158 165 159 169 184 174 187 200 208 200 213 200 199 215 219 225 229 230 237 246 248 239 231 225 195 204 234 229 233 238 241 248 147 132 130 180 142 139 184 192 164 181 205 172 171 148 132 138 141 148 143 147 163 170 190 203 217 218 214 219 210 207 207 209 215 228 239 248 247 249 244 234 216 177 185 230 229 236 245 248 147 117 118 175 149 141 129 117 153 172 170 157 182 170 163 137 129 133 122 132 147 154 184 208 227 233 232 226 211 220 210 175 167 174 193 228 252 249 246 242 230 207 144 186 217 230 236 243 141 112 114 158 149 118 103 121 131 164 149 142 124 110 112 139 136 131 134 115 125 145 168 206 232 241 243 228 206 208 163 156 143 148 124 130 182 242 245 243 237 232 157 142 181 212 230 234 139 110 114 139 142 90 108 141 152 147 112 94 63 76 101 97 122 131 115 101 110 136 162 203 231 241 245 227 203 174 173 179 162 172 235 186 139 182 239 243 242 243 189 139 172 181 201 224 129 107 119 128 131 92 128 135 123 117 93 65 102 144 101 95 124 115 117 86 116 136 159 198 229 238 242 229 194 196 166 157 56 106 227 252 199 192 227 241 245 246 216 151 159 169 199 198 117 110 115 124 100 81 102 97 112 122 94 81 145 130 31 56 147 116 84 89 118 146 154 199 225 236 240 235 223 195 159 146 77 81 183 244 223 236 239 244 248 246 231 180 160 134 179 205 116 110 104 84 75 94 85 110 139 122 126 111 135 126 59 29 89 142 86 91 119 148 159 194 221 234 239 242 225 197 195 183 170 155 202 220 230 244 250 249 249 249 242 184 159 124 136 188 96 107 107 86 77 69 95 148 139 129 135 126 117 110 97 106 133 133 116 100 122 136 159 196 223 237 237 241 237 213 205 214 235 239 235 235 249 253 251 247 248 250 249 199 145 112 128 172 107 103 83 70 47 38 119 155 134 137 151 144 142 143 157 162 149 127 111 121 129 134 155 195 227 239 237 240 243 233 223 222 235 244 248 252 252 250 249 246 246 247 248 222 158 126 143 179 103 83 68 44 36 54 141 151 143 152 156 155 160 163 164 151 132 123 122 129 129 137 149 193 229 235 237 237 241 242 236 225 222 228 243 250 251 247 245 242 243 245 245 237 171 160 121 143 75 66 50 35 39 82 155 152 152 155 158 158 160 159 152 138 127 125 128 130 130 138 150 189 225 230 235 238 239 241 240 236 228 225 232 241 244 244 244 242 242 241 244 239 197 177 126 110 56 48 30 32 45 102 156 154 148 150 160 162 160 157 144 135 132 130 128 137 136 139 155 189 217 234 236 239 241 242 239 238 232 225 229 233 238 242 243 244 243 240 241 238 203 177 132 66 35 20 11 32 48 105 152 148 143 148 154 157 158 150 142 137 131 124 133 126 126 143 158 184 217 240 239 231 232 243 241 238 234 228 229 232 237 240 244 245 243 239 242 236 183 194 130 60 22 10 32 35 46 86 151 142 139 146 151 152 153 150 137 134 126 126 117 104 145 145 158 181 218 243 244 248 236 231 241 238 236 232 228 231 235 240 243 242 241 239 240 237 180 195 138 77 17 21 32 29 40 63 147 143 139 140 145 148 148 147 139 133 127 122 84 111 113 132 155 179 210 231 225 168 184 225 227 240 239 237 230 230 234 237 239 240 241 238 239 239 197 195 158 59 17 26 27 19 29 42 140 142 135 134 135 138 144 145 138 132 134 103 70 107 28 26 95 171 186 211 93 21 105 224 211 234 241 239 235 231 233 234 235 237 240 239 239 239 195 192 159 38 15 33 26 16 21 26 127 141 131 126 127 133 140 138 138 137 138 102 69 118 58 10 69 145 177 237 159 96 155 231 212 222 238 241 237 234 231 232 233 236 238 240 239 235 199 203 136 15 15 40 34 18 23 14 109 142 127 125 122 127 136 139 137 139 133 115 99 122 110 100 122 146 175 220 245 241 229 236 230 222 232 238 239 236 232 230 231 232 234 239 236 235 241 222 91 16 22 55 39 23 29 31 95 139 125 122 116 122 130 135 136 139 134 121 123 124 122 124 153 199 173 204 233 233 238 238 238 236 231 232 237 235 232 230 229 229 233 239 236 234 241 189 60 29 22 53 48 28 26 50 88 132 126 117 114 118 125 129 138 140 130 125 132 133 129 139 162 204 177 196 224 233 235 238 240 241 235 231 230 231 233 228 226 230 234 237 235 238 236 170 54 59 25 51 55 33 31 44 71 119 124 120 119 115 121 127 134 137 123 130 134 134 135 135 158 206 181 199 225 232 236 240 243 241 234 227 229 231 230 227 228 228 231 232 232 230 228 147 59 103 26 57 55 38 33 63 59 103 121 120 119 115 119 124 126 131 113 126 132 126 133 131 166 214 180 204 232 232 236 236 238 237 230 225 231 232 228 228 227 227 229 230 233 221 209 133 109 127 22 41 47 36 39 50 51 91 123 114 114 115 118 121 123 115 105 126 126 120 134 130 140 176 190 200 215 234 241 236 234 235 231 224 228 233 229 230 228 226 228 231 233 217 169 115 118 129 50 37 46 47 45 92 80 66 122 116 111 115 119 119 118 106 107 121 134 147 117 118 109 106 127 151 151 181 215 225 215 214 216 224 232 211 221 235 232 230 229 239 215 157 178 165 163 143 90 76 62 60 60 104 75 21 109 123 111 115 118 115 115 112 113 110 150 146 91 85 64 27 32 95 70 104 164 200 198 197 194 200 227 199 227 235 221 227 234 241 177 58 120 146 128 117 120 152 93 107 123 109 60 90 146 131 151 118 135 146 123 156 162 112 154 123 105 79 76 73 44 104 61 103 90 84 172 199 189 196 205 204 201 204 202 202 217 208 149 114 97 106 119 151 149 168 97 136 84 125 91 144 54 129 176 113 157 154 116 137 175 129 163 85 94 25 15 69 110 47 1 86 4 65 152 228 231 197 212 200 199 223 239 218 190 203 98 90 105 134 155 178 187 166 155 129 80 136 36 98 123 121 170 102 163 144 158 142 169 139 166 57 64 9 29 75 118 36 29 79 7 90 145 210 244 196 206 198 208 243 243 214 206 177 101 101 99 157 150 175 178 187 185 73 93 117 63 79 123 92 187 144 177 140 176 138 169 140 183 105 102 58 86 98 101 38 52 73 0 53 157 221 210 190 204 208 201 216 214 198 209 157 88 123 137 152 144 175 204 192 170 100 123 94 92 106 55 37 146 156 130 116 144 141 122 118 140 139 89 91 87 52 61 38 42 33 18 107 185 195 187 194 201 217 224 207 208 234 214 125 86 66 120 131 116 135 246 196 206 159 173 198 193 109 20 73 108 136 120 109 112 109 106 112 114 127 123 115 133 141 131 135 142 149 183 212 207 201 199 198 205 228 234 234 236 246 206 176 136 20 49 76 68 92 255 253 228 221 223 206 193 163 61 35 17 126 133 106 107 110 105 110 113 117 125 144 162 176 190 190 199 223 242 233 214 211 208 209 211 217 218 219 236 208 186 243 255 180 57 33 76 209 255 255 255 251 255 255 245 240 215 176 173 217 147 101 95 101 99 101 105 111 107 110 120 124 132 144 148 163 175 186 202 202 197 197 195 193 194 217 229 210 207 226 242 255 230 149 218 255 255 255 254 254 251 208 180 207 238 226 234 255 175 95 91 91 96 99 97 100 105 110 116 113 110 113 118 128 159 177 183 177 178 174 173 161 178 211 223 237 241 239 241 244 255 255 255 253                                                                                                                                                                                                    7
                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                  ..
44 47 43 37 32 46 34 25 19 24 49 113 180 196 216 223 220 221 216 213 213 215 211 213 205 195 180 162 132 93 63 51 35 41 41 30 15 9 24 24 29 30 34 26 18 31 46 47 47 50 43 34 35 39 30 22 22 38 64 145 190 193 207 210 206 214 216 212 213 213 213 213 208 200 187 175 150 113 76 55 35 40 40 29 25 11 18 30 21 28 26 30 20 24 43 47 37 50 37 30 36 28 23 16 26 49 77 174 195 194 202 208 210 214 212 211 211 213 215 213 209 205 195 182 162 134 97 66 43 29 37 28 27 20 8 26 21 24 22 34 23 19 34 48 27 38 27 29 29 17 14 13 36 48 106 190 193 196 205 202 208 208 209 209 210 213 213 217 214 209 198 188 176 160 125 87 61 33 33 27 19 22 10 9 34 22 24 36 24 15 31 46 27 27 19 21 22 13 11 15 47 55 147 188 192 194 204 202 205 206 213 212 214 216 210 213 217 212 203 194 185 173 153 113 81 45 28 27 16 13 14 3 26 22 15 28 29 14 23 43 25 17 14 21 21 12 10 24 56 73 165 184 189 192 205 208 204 211 215 216 216 216 213 215 215 213 209 203 193 182 171 144 101 55 25 28 15 10 16 10 10 20 13 20 30 15 14 32 18 13 13 19 14 8 11 44 58 86 169 184 195 195 204 207 207 213 215 216 213 213 217 217 214 214 209 207 200 187 176 160 112 41 14 23 18 10 10 14 4 13 14 13 24 19 12 27 13 9 12 15 7 5 17 62 60 107 169 180 193 195 205 203 207 211 216 218 215 213 214 216 215 213 211 209 203 199 185 148 85 54 38 21 16 12 10 11 3 9 12 10 18 20 13 26 8 9 10 13 6 4 25 59 60 129 171 177 188 191 200 201 209 212 214 215 214 216 214 217 217 213 212 210 209 190 160 86 74 129 107 39 16 12 10 9 8 4 11 14 14 12 12 20 6 6 9 6 4 8 31 45 61 142 171 173 186 193 204 204 213 218 218 213 211 214 211 214 213 208 206 205 169 112 84 84 152 180 146 68 22 11 11 7 9 8 6 11 5 10 13 14 5 6 9 3 6 13 34 33 77 153 166 175 183 190 195 203 207 214 214 210 208 206 207 209 208 203 192 168 102 64 106 160 152 119 107 66 29 12 11 12 10 9 4 7 3 13 9 7 5 7 6 3 8 18 33 42 102 157 165 177 184 190 189 199 202 203 208 208 205 198 199 202 200 196 168 134 96 110 113 66 42 44 39 24 27 27 6 12 10 4 5 11 1 13 3 6 5 6 5 2 9 20 27 50 128 162 168 176 183 189 187 196 198 197 198 205 204 196 187 194 196 188 150 133 116 67 35 92 74 123 127 51 32 44 21 8 11 6 3 5 3 7 3 2 3 3 3 3 12 17 24 71 143 168 171 181 189 190 185 193 190 193 191 194 196 191 189 192 195 181 157 118 80 68 61 109 94 137 142 101 95 60 38 7 13 8 2 5 3 7 8 2 1 2 2 4 11 14 23 82 133 149 150 147 139 134 121 142 161 170 182 188 189 189 199 199 198 168 128 116 138 134 92 78 101 128 105 133 166 114 50 16 8 9 2 3 4 8 8 3 1 1 2 5 13 15 22 53 69 74 66 58 55 53 61 72 109 138 170 181 188 200 209 207 200 163 136 134 120 150 136 115 124 130 152 172 177 158 104 34 15 5 5 4 4 5 7 2 2 1 2 7 16 14 38 84 99 109 113 104 97 83 96 101 112 124 144 164 182 202 215 210 203 183 165 154 151 151 157 151 158 180 181 170 179 180 166 71 24 7 5 5 5 4 7 2 2 3 3 8 14 28 83 150 175 184 164 121 115 101 94 109 120 99 76 109 158 186 207 214 205 187 184 189 166 156 164 170 168 177 180 177 186 192 182 125 42 19 3 5 4 5 8 3 2 4 4 11 13 40 104 144 121 74 51 58 85 133 112 109 126 119 76 70 121 167 190 209 207 188 177 182 176 176 179 177 184 194 192 194 196 189 186 166 58 44 3 4 3 4 12 7 3 5 7 10 11 57 106 64 26 31 36 108 132 217 205 146 132 157 142 85 99 138 177 196 204 191 182 189 192 195 198 197 200 206 209 209 202 186 182 180 85 93 23 3 4 2 10 9 6 7 10 7 22 70 51 30 64 68 65 64 139 189 158 158 169 184 148 110 113 116 159 185 197 197 183 185 191 199 209 213 212 213 218 214 206 197 172 178 144 110 83 1 7 2 14 11 8 9 8 3 31 62 30 55 95 79 68 91 139 133 146 181 177 174 132 130 127 106 133 176 196 201 185 179 186 191 202 215 216 217 222 218 207 205 184 164 159 99 120 10 1 3 15 16 21 12 7 8 42 59 59 67 81 88 83 99 119 147 174 178 184 175 135 152 130 105 118 165 195 205 194 185 188 188 195 212 221 224 224 222 220 208 199 181 158 130 116 61 1 2 11 15 18 11 2 43 98 77 88 108 123 134 133 151 178 180 185 186 186 173 164 161 137 115 119 148 192 210 195 189 188 191 193 204 220 224 226 223 219 212 197 194 180 161 122 97 17 1 6 17 11 12 16 97 132 107 117 118 141 156 165 174 178 182 183 190 187 186 173 146 143 134 123 137 180 212 202 191 190 187 186 197 203 210 215 216 210 204 204 197 192 168 152 124 44 0 6 15 14 20 56 122 139 145 151 149 149 160 175 186 193 197 193 204 202 196 166 141 149 145 132 135 160 193 208 192 186 185 177 177 176 185 195 201 202 202 201 197 192 176 168 160 62 3 12 12 30 33 89 131 147 162 161 168 173 181 191 199 206 206 210 216 210 201 171 146 148 140 139 142 160 184 206 205 191 186 188 180 165 162 174 184 188 192 190 187 188 181 175 186 70 1 12 12 39 57 112 134 152 161 162 169 177 186 186 198 206 206 213 215 218 210 186 143 123 116 126 139 158 177 201 215 200 184 187 196 188 173 158 154 169 177 180 179 186 186 185 187 69 5 19 15 31 73 114 132 145 153 157 168 176 181 182 196 204 205 212 219 219 210 194 142 107 115 114 127 154 174 195 206 200 188 184 190 194 196 181 157 141 150 167 176 182 189 189 183 63 7 19 22 84 114 96 121 135 145 151 162 173 180 182 191 201 206 215 219 210 195 185 133 109 134 132 127 143 170 186 209 223 194 181 182 187 190 189 186 172 150 160 178 179 190 190 173 45 10 16 25 163 205 159 109 134 141 149 156 166 176 184 187 194 202 209 207 199 191 181 124 109 141 146 126 135 161 181 200 212 194 165 176 192 194 194 191 183 166 150 179 187 191 189 158 26 14 14 22 204 212 209 122 119 138 146 155 165 173 179 180 188 196 203 202 190 184 180 119 106 139 144 136 127 139 163 178 178 167 166 187 196 196 195 187 151 169 163 180 194 184 189 129 7 14 9 19 197 206 210 163 101 135 141 150 157 160 167 178 184 190 194 192 187 186 173 128 94 108 112 116 102 99 111 132 143 163 190 194 197 196 201 165 145 178 185 189 196 181 188 96 4 12 7 18 210 211 210 209 129 118 137 141 153 157 164 172 175 179 187 187 180 167 154 139 112 97 93 83 82 86 102 120 155 187 194 196 200 190 142 144 185 178 190 196 194 185 180 56 3 13 8 16 216 217 215 213 185 107 127 128 139 150 156 161 168 172 181 171 160 153 147 146 145 140 137 133 136 149 166 175 190 201 198 186 163 155 110 158 192 185 196 202 193 190 161 24 6 11 8 11 220 220 222 213 208 134 112 119 125 135 142 154 159 162 158 156 163 158 152 153 154 158 156 167 176 192 200 202 202 187 164 136 167 187 138 178 202 196 196 201 190 197 130 6 7 10 10 5 223 223 222 204 214 178 107 120 117 125 134 146 150 153 149 156 160 160 157 157 158 160 165 170 165 168 171 169 173 141 139 201 211 167 147 183 204 200 197 196 186 194 97 6 7 8 9 4 220 214 202 181 205 206 119 118 118 123 138 144 152 157 159 145 119 120 132 123 121 121 120 118 105 121 119 119 143 153 200 221 167 146 150 189 204 202 201 194 189 185 62 4 4 8 9 2 214 202 174 175 195 217 144 114 122 129 143 153 168 176 182 158 107 67 43 41 61 82 119 125 149 126 131 166 180 216 201 150 159 147 163 199 201 204 205 194 194 162 19 5 2 10 10 2 204 177 154 173 202 220 157 102 121 127 144 155 173 188 189 185 179 150 114 93 87 100 148 153 196 171 186 209 185 162 162 168 156 143 187 204 201 206 204 191 200 115 0 8 2 13 12 5 177 110 134 179 204 214 169 97 112 116 134 153 170 185 189 179 178 163 152 137 120 103 107 125 132 137 150 169 182 188 208 162 137 172 197 199 201 209 204 191 193 64 0 7 3 16 12 6 106 91 153 180 192 209 164 86 111 111 128 150 165 187 197 176 155 163 157 137 120 106 89 103 130 150 151 177 182 160 138 130 163 193 198 196 206 209 201 193 168 20 1 4 4 13 13 8 28 75 123 163 185 203 163 79 107 121 130 144 168 188 199 192 155 147 161 159 139 113 103 93 89 104 109 116 122 135 144 162 187 193 195 202 209 211 195 198 109 0 5 2 7 13 10 7 33 42 85 130 167 201 157 69 96 117 131 147 167 184 192 189 173 153 151 159 161 145 132 128 126 127 128 134 145 160 169 184 196 198 204 209 210 207 192 182 65 0 4 3 12 13 11 7 36 66 114 157 194 215 110 51 84 103 125 141 158 174 184 176 161 165 165 157 161 161 157 152 152 152 157 161 169 182 199 206 207 208 209 210 209 204 187 144 53 3 4 6 15 13 12 9 39 105 158 193 207 199 53 31 69 87 105 126 144 158 172 173 147 144 170 171 161 164 166 172 176 181 186 194 202 208 212 207 202 203 208 206 206 202 170 110 61 11 5 10 12 11 14 11 41 110 164 196 208 183 41 10 38 66 87 106 127 139 154 164 140 124 148 174 173 172 175 178 187 191 192 200 208 210 212 211 207 205 209 207 204 194 141 107 97 22 11 14 8 10 13 11 42 100 146 192 208 163 35 20 19 32 66 88 106 121 133 144 130 108 127 158 170 177 180 179 183 179 184 197 205 211 212 213 209 207 208 204 201 179 124 130 115 32 16 15 9 12 15 15                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                         1
17 10 31 28 45 92 96 101 107 116 127 144 152 159 171 181 187 190 192 194 196 200 207 210 208 209 210 205 211 209 209 212 203 197 190 184 179 178 175 166 162 153 135 89 54 23 29 23 10 24 22 30 63 99 98 103 114 118 134 151 155 159 170 177 183 190 194 197 199 205 207 209 209 209 210 211 213 212 212 211 207 195 189 185 179 179 175 169 164 159 149 116 68 45 33 25 15 33 18 38 76 104 99 101 118 122 133 147 159 162 167 172 183 190 195 201 202 204 206 209 211 208 208 211 207 208 211 210 205 194 189 185 179 177 174 171 165 156 154 138 89 52 48 23 18 33 21 39 80 105 101 100 116 121 133 144 162 166 169 175 185 193 197 202 203 203 203 203 202 203 204 204 201 203 207 208 202 195 190 184 177 174 172 169 161 156 158 143 109 56 43 28 15 26 22 43 77 98 97 100 108 121 132 143 157 160 167 176 183 189 197 202 203 204 207 207 204 204 204 201 199 199 202 205 199 191 187 180 176 172 169 167 156 158 158 140 117 74 35 29 12 23 21 44 75 91 102 101 100 120 136 145 158 160 163 175 184 190 194 199 203 204 208 207 205 206 201 196 198 197 196 200 198 185 181 178 176 172 167 166 157 161 154 141 123 83 45 32 12 26 20 46 64 88 109 108 104 117 136 148 157 160 163 174 184 191 191 195 202 204 205 204 202 202 198 196 197 194 193 194 190 185 181 177 175 173 166 165 159 157 153 137 123 89 47 46 10 18 18 43 58 80 108 108 108 118 132 147 157 164 175 182 187 191 194 200 203 204 204 206 205 202 198 195 193 194 195 196 193 190 187 181 175 171 163 160 160 160 152 133 118 96 47 43 10 14 22 36 55 78 106 109 110 118 133 148 156 165 176 182 186 189 194 199 204 207 206 207 206 203 197 195 194 196 199 202 199 197 196 192 183 170 159 161 163 164 156 133 104 94 60 36 13 19 21 27 58 88 106 109 111 121 140 151 159 167 174 182 186 190 194 196 201 205 207 207 207 200 188 195 200 200 202 204 200 198 196 192 185 174 158 158 163 164 160 143 109 85 64 45 17 24 22 22 65 92 101 103 110 122 141 146 152 160 165 170 178 192 193 196 201 204 201 204 207 192 185 189 198 206 207 205 192 185 186 182 180 178 166 155 159 163 159 150 113 77 68 41 49 15 25 27 71 95 98 100 113 124 123 118 129 130 142 154 162 177 185 189 195 195 189 196 198 177 172 182 188 197 198 185 165 155 165 161 160 169 172 162 154 158 154 151 125 69 61 42 134 14 28 34 79 97 96 100 111 111 78 66 61 49 62 86 112 133 150 168 175 176 175 184 190 166 153 152 163 145 121 99 90 84 95 111 118 153 172 168 155 148 154 153 136 75 58 44 227 58 21 47 88 91 90 88 91 81 78 98 89 82 74 68 64 66 88 120 138 147 162 180 182 151 124 116 124 96 33 24 28 29 38 41 58 100 141 159 159 145 153 158 148 85 55 51 200 137 16 63 91 87 83 75 78 68 76 88 91 134 130 127 121 125 99 83 104 110 147 175 182 142 93 102 91 80 96 117 106 139 174 182 171 153 149 146 145 146 147 159 158 100 57 62 105 97 51 72 84 77 71 76 59 57 55 61 77 137 136 107 102 180 217 126 55 78 134 181 190 154 95 59 80 114 133 116 78 111 193 247 255 247 209 177 129 140 150 159 169 116 52 48 113 89 60 80 87 68 74 72 58 67 51 62 60 103 113 67 122 180 236 182 46 50 118 185 179 105 104 80 98 100 96 119 66 104 190 212 233 183 154 143 108 110 141 157 163 100 64 102 91 82 58 77 57 57 78 69 71 62 46 91 79 51 44 77 140 103 117 122 64 63 151 181 181 127 84 119 111 99 104 131 55 66 106 128 155 107 94 114 119 108 136 136 112 104 76 182 81 65 71 89 79 70 87 79 70 64 67 91 94 76 87 112 115 110 111 79 48 77 177 200 205 161 62 99 133 128 101 99 89 91 93 109 99 90 91 97 113 103 82 103 137 165 82 143 80 68 84 85 87 79 95 97 90 81 74 86 98 101 94 106 117 117 90 62 58 120 187 201 209 190 76 56 110 133 112 105 109 123 121 113 111 113 122 117 123 112 103 152 163 171 114 119 80 64 78 81 90 85 95 93 95 89 81 84 92 94 90 93 107 110 81 64 72 149 192 206 210 212 115 64 118 138 128 114 102 107 107 111 112 117 126 129 143 121 131 170 163 161 155 150 81 65 80 80 88 89 91 100 98 98 97 102 110 114 114 123 130 109 86 65 86 160 190 207 210 209 167 108 147 145 149 147 137 137 133 133 135 134 130 128 145 110 154 169 161 163 158 144 75 71 85 81 85 94 84 96 109 120 119 119 125 124 130 138 130 102 85 62 117 161 189 207 211 198 192 119 124 158 153 164 163 162 167 165 161 154 146 148 128 120 171 168 162 165 153 140 77 89 87 86 87 92 103 93 101 121 128 132 132 133 136 135 122 107 92 98 127 152 188 206 209 198 189 162 118 153 160 166 174 168 163 171 171 169 164 159 141 161 169 172 167 166 161 154 88 94 86 89 91 101 115 119 112 116 125 132 137 138 139 138 131 108 95 105 128 151 189 202 204 194 185 180 158 144 165 176 182 178 182 185 184 178 178 176 178 177 174 170 168 166 164 178 91 86 87 92 94 105 119 128 125 132 141 141 144 148 153 151 145 120 97 100 122 157 192 202 203 193 185 175 166 154 165 172 184 190 199 199 195 189 184 179 177 177 177 169 169 166 162 181 94 78 85 90 92 106 124 130 128 128 135 144 152 162 166 162 155 116 98 96 122 168 195 207 204 191 183 176 168 152 165 188 193 194 189 186 189 187 184 180 180 177 178 172 164 166 163 179 86 68 81 87 93 104 120 124 125 131 136 147 158 170 172 170 145 101 113 106 130 169 190 205 207 196 184 180 181 160 156 181 198 200 197 195 195 192 181 177 180 178 172 171 167 166 166 164 100 72 76 81 90 104 114 113 115 130 142 155 157 168 171 166 121 95 127 117 131 158 188 204 216 205 187 189 191 177 155 168 182 195 196 195 194 188 183 178 175 173 170 168 170 164 159 137 90 83 72 80 88 103 116 108 111 124 134 144 153 164 166 154 103 89 133 126 125 152 187 207 212 206 194 191 192 185 158 173 167 171 188 193 187 181 178 174 168 170 172 167 169 166 153 139 83 90 73 74 87 97 105 106 103 116 128 143 154 160 156 139 99 83 117 115 117 135 172 185 183 176 181 183 180 162 162 179 165 161 181 187 185 179 175 167 170 169 168 166 167 163 155 174 105 106 77 70 83 94 100 103 99 105 123 138 150 153 146 139 110 75 65 58 68 105 144 154 158 158 158 126 102 135 165 178 170 154 172 185 182 173 174 170 169 168 166 162 164 159 157 172 121 105 70 69 79 93 98 100 98 106 119 136 150 141 132 123 111 94 69 40 26 48 102 121 115 127 115 91 116 148 156 155 161 154 155 176 173 172 171 165 170 170 166 163 166 151 169 187 96 74 60 70 77 87 93 98 98 105 118 136 143 119 124 111 108 109 98 83 69 59 63 67 63 102 134 140 146 143 150 145 148 145 146 166 174 171 165 166 169 171 164 164 163 149 161 199 143 145 184 78 75 89 90 92 97 105 120 138 124 115 128 123 109 112 109 104 103 112 120 115 103 113 160 150 151 150 150 143 148 147 142 164 175 166 163 172 167 166 161 164 156 149 136 155 242 244 239 100 68 94 94 83 90 104 121 133 102 110 120 117 108 115 124 124 128 136 157 151 143 148 161 165 169 168 155 138 145 154 136 153 173 164 167 168 162 167 161 163 153 162 221 191 230 227 240 142 60 92 98 81 78 96 123 127 97 106 113 98 91 103 114 111 106 100 114 148 169 134 116 120 118 129 138 125 124 142 133 147 174 166 161 158 161 163 165 158 148 181 250 248 229 229 238 189 57 80 95 86 76 94 119 123 99 111 108 70 42 51 51 51 60 62 55 63 88 77 74 80 70 67 77 73 92 135 138 153 172 166 152 152 163 158 153 143 138 209 247 240 230 231 232 231 84 64 87 91 79 85 108 126 104 117 107 84 64 48 62 82 90 97 101 95 95 107 119 111 104 101 133 143 113 139 144 162 177 161 152 157 157 151 141 133 144 237 242 242 213 217 219 237 148 48 84 87 76 75 101 130 112 115 121 130 109 95 100 105 123 144 155 151 149 162 150 139 146 151 166 167 154 145 148 168 174 159 153 160 154 143 138 118 182 245 238 241 158 165 172 178 172 60 64 82 75 69 89 116 118 117 132 142 116 93 94 95 106 115 123 141 139 132 130 137 141 149 161 167 161 154 150 164 166 152 149 152 139 139 128 121 236 253 243 239 140 144 150 145 154 103 45 70 72 64 73 100 107 108 121 129 120 95 87 84 80 86 83 82 84 94 109 126 139 150 159 169 165 157 145 153 149 143 145 143 133 134 118 124 150 188 225 247 144 145 145 148 147 142 63 48 69 68 62 83 92 102 113 119 118 105 98 101 95 106 118 119 112 116 131 144 149 156 162 168 170 155 140 142 138 134 139 133 128 115 117 137 39 11 39 84 141 138 136 142 142 138 125 47 55 67 61 70 81 99 110 109 116 123 119 124 119 122 130 134 140 140 150 160 167 167 165 165 162 157 142 132 128 133 136 129 110 104 141 131 50 33 13 0 136 133 131 137 138 130 140 111 43 55 59 69 78 97 109 104 118 127 141 141 140 146 144 147 152 156 169 175 177 179 168 161 159 158 138 125 125 125 122 112 97 133 148 122 35 32 31 24 132 132 131 139 144 141 137 144 85 34 51 59 72 85 103 97 115 133 148 150 150 147 153 160 168 171 180 177 174 180 166 157 162 147 133 121 115 119 107 86 109 147 144 107 25 27 24 27 127 129 138 140 128 115 89 46 23 37 36 44 59 73 89 97 110 129 139 149 150 152 157 163 169 165 161 172 181 175 161 154 146 136 124 112 103 93 82 94 135 147 142 107 23 25 24 21 119 111 92 64 39 18 3 0 1 37 40 34 43 61 75 91 99 105 122 135 147 158 154 161 165 162 159 169 173 163 157 150 139 123 101 90 82 76 89 118 142 142 135 149 35 21 23 19                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                   1
255 254 254 254 251 249 240 238 236 238 228 203 179 89 57 56 90 129 164 190 207 222 233 244 249 249 249 250 250 247 245 239 225 205 185 147 123 212 201 161 211 226 199 216 224 194 206 216 255 254 253 251 250 240 239 232 232 226 205 174 71 28 78 127 175 211 230 237 241 242 247 250 250 250 250 251 252 250 248 246 239 229 213 203 212 203 123 147 193 208 208 193 221 222 203 200 255 255 253 251 244 238 232 225 223 207 186 59 25 108 190 228 240 242 245 247 247 248 248 250 250 250 249 250 251 250 249 248 245 238 230 238 217 157 116 129 178 196 204 187 183 216 220 204 255 253 251 246 239 235 226 215 207 202 97 32 129 220 242 243 245 246 248 248 249 248 249 250 250 250 249 249 249 248 248 247 246 243 240 229 209 196 165 147 146 193 185 200 168 175 204 221 254 252 248 242 237 231 218 202 213 135 46 132 217 239 242 247 249 249 248 248 249 249 249 249 249 248 249 248 247 248 248 246 244 242 239 235 223 209 195 168 135 172 186 170 178 157 173 198 254 250 245 236 233 228 203 205 188 61 109 214 238 242 246 247 248 247 247 248 248 247 247 247 248 247 247 247 246 246 245 243 242 243 241 240 237 229 211 193 153 141 182 168 170 164 152 159 252 247 240 237 233 210 206 198 124 73 187 236 240 243 244 245 246 246 246 246 246 246 245 245 245 244 243 240 238 237 239 240 241 243 242 242 241 240 235 217 192 131 146 177 169 157 156 141 250 241 237 237 219 196 210 166 83 141 230 238 239 240 241 242 243 243 244 244 244 243 242 242 242 240 238 234 234 236 238 239 240 240 241 244 242 241 239 229 203 162 122 160 182 169 164 152 247 240 237 231 200 199 180 115 104 204 235 239 238 237 237 237 239 240 241 241 240 240 239 239 240 239 237 237 237 239 241 242 242 242 242 243 242 242 241 234 210 181 139 84 131 184 174 157 245 240 234 226 191 190 145 101 168 230 236 236 233 234 234 234 236 237 239 239 238 239 237 239 240 241 241 241 241 243 244 243 244 244 244 243 241 240 237 236 224 201 173 99 49 117 178 183 246 236 236 210 191 159 117 118 210 237 235 233 229 230 234 234 235 237 239 239 238 237 236 235 239 240 241 241 242 242 242 244 246 247 246 246 243 239 235 234 234 214 191 173 103 71 131 155 246 226 231 196 177 111 62 169 234 238 235 231 230 230 233 235 237 238 238 236 236 237 236 235 236 236 236 238 242 242 245 249 247 244 242 246 246 242 236 235 235 231 206 175 142 80 113 129 232 223 219 189 135 66 74 205 234 236 235 233 230 231 235 235 238 239 238 237 234 236 233 232 230 231 232 235 234 231 224 212 204 208 217 222 225 234 241 238 236 237 226 183 141 121 94 140 227 226 207 165 104 52 104 216 231 235 234 232 231 231 234 236 233 231 232 231 230 232 231 228 226 220 220 218 202 184 157 156 179 214 224 228 225 219 226 236 237 235 235 215 145 117 139 124 227 216 198 136 82 54 137 215 228 232 233 232 232 236 236 236 232 222 219 220 223 226 228 226 220 213 206 191 174 162 158 173 196 203 211 212 211 218 223 225 233 236 235 233 195 91 109 152 223 208 177 113 83 76 147 207 229 230 234 242 242 235 229 220 214 209 202 204 211 219 227 227 220 206 192 179 165 159 156 157 180 190 182 186 191 180 196 220 234 237 236 235 230 145 61 106 226 199 155 107 108 93 145 207 225 230 237 221 202 181 171 157 159 174 177 181 194 212 227 231 225 210 190 169 161 142 136 155 167 171 165 142 143 175 165 184 220 234 238 236 239 197 100 77 225 189 153 115 117 98 131 208 223 232 220 209 198 173 155 156 149 146 141 147 183 213 234 241 238 218 192 169 155 143 139 77 55 36 48 78 85 142 187 184 209 234 237 237 240 222 139 100 231 187 150 88 109 86 110 215 223 223 222 207 189 186 185 173 155 137 135 131 156 212 241 246 245 232 205 180 158 117 130 110 79 76 98 109 125 144 183 208 225 234 237 237 236 230 154 100 245 184 138 71 120 80 114 216 222 219 199 170 174 169 162 159 134 114 112 123 135 197 243 247 246 232 216 205 187 175 190 173 170 172 172 191 213 214 219 229 237 237 238 237 235 235 179 121 253 195 140 90 141 89 102 214 220 202 165 163 137 120 93 68 85 76 98 133 167 195 241 248 246 235 229 220 198 174 158 155 155 158 174 192 214 234 237 239 241 241 240 237 234 236 203 142 255 223 140 122 147 103 83 203 213 181 161 132 75 65 46 46 73 132 169 177 189 207 239 249 247 239 234 233 230 218 199 179 173 180 199 220 234 239 240 241 242 241 238 235 233 232 214 165 255 240 152 123 148 108 81 193 225 174 152 106 40 70 125 134 166 189 175 177 199 220 238 246 248 242 239 238 238 240 241 241 240 241 242 244 244 247 246 244 240 238 236 235 231 231 220 191 254 247 183 110 146 122 78 190 232 185 160 112 120 145 149 156 152 142 163 206 218 227 241 246 249 247 243 242 241 241 243 245 246 247 246 247 248 248 246 243 239 235 233 231 230 230 224 206 252 254 213 123 112 132 89 189 233 207 194 184 198 182 149 140 142 178 221 223 220 232 243 247 248 248 244 240 230 233 241 244 245 247 248 248 247 246 244 242 238 233 231 230 227 228 228 212 251 254 235 147 93 150 128 180 235 224 226 224 210 196 186 195 218 234 235 228 224 235 245 248 249 248 244 242 233 205 215 233 238 241 242 243 243 242 242 239 237 232 228 229 225 225 229 219 252 254 240 183 104 174 132 156 238 230 233 232 230 234 236 238 239 239 237 225 213 235 244 246 247 249 249 245 243 219 183 200 221 230 235 235 236 236 235 233 234 232 229 225 221 224 231 225 254 253 234 216 115 161 146 137 233 232 231 234 237 238 240 242 241 239 234 208 201 241 238 238 237 239 229 227 238 225 180 175 194 212 221 224 226 226 227 228 228 228 229 225 222 226 234 228 255 253 248 253 158 119 160 143 218 231 230 232 235 239 240 241 240 234 226 195 207 218 204 205 201 200 188 164 172 189 203 183 170 189 205 210 215 217 218 221 224 224 226 226 226 231 235 233 254 254 253 255 201 115 126 142 200 225 227 231 234 238 237 235 234 226 210 181 189 164 131 139 135 153 133 87 130 203 219 206 186 173 183 194 200 202 205 208 216 222 225 227 228 234 238 233 255 255 254 254 228 120 101 130 168 222 219 230 234 232 230 228 222 213 192 173 174 136 75 90 98 107 122 173 210 225 226 219 209 190 175 181 191 195 197 196 209 222 226 227 230 236 240 232 254 253 253 251 251 160 119 120 140 214 211 222 225 221 218 214 210 199 181 180 190 178 155 131 110 114 156 207 226 232 232 228 209 199 183 176 186 200 198 185 206 220 226 228 233 236 241 220 254 254 254 252 255 198 135 136 120 190 211 211 212 209 205 201 193 182 180 188 197 201 198 185 175 175 182 223 244 244 242 235 218 198 192 178 194 207 212 197 208 224 229 231 232 234 241 216 255 255 254 253 255 210 162 158 116 158 212 199 202 195 189 188 179 177 175 188 202 211 211 212 210 212 212 199 189 184 198 211 207 179 179 196 209 223 226 222 216 227 230 232 233 234 240 203 255 255 254 251 250 213 182 172 132 137 209 198 193 182 183 173 168 165 168 186 202 215 212 167 127 121 120 117 116 127 120 117 104 45 108 208 220 230 230 233 225 227 230 233 235 231 240 187 255 255 253 244 253 209 183 187 154 135 199 201 186 174 172 166 155 149 163 178 191 177 111 89 126 144 166 221 217 206 179 139 113 139 196 207 226 234 233 234 228 227 230 232 232 230 238 159 254 254 250 236 250 227 183 200 172 145 184 206 188 183 176 174 167 162 178 141 68 44 65 166 209 221 208 217 203 186 199 211 223 245 238 220 226 233 234 233 231 229 228 223 229 225 231 163 254 254 253 233 219 225 206 201 188 172 187 209 194 189 188 193 198 195 194 171 108 90 128 170 189 198 196 200 224 240 239 237 237 235 235 232 231 233 230 230 231 229 225 224 226 220 224 183 254 253 254 251 212 198 201 196 191 177 148 199 203 194 200 202 207 208 202 217 229 210 214 217 214 219 223 224 223 220 230 234 235 237 236 236 234 229 227 228 228 227 221 219 219 214 215 183 255 255 253 255 238 192 179 176 153 131 92 161 210 196 198 203 212 214 211 216 220 222 211 200 192 182 183 185 198 217 228 233 235 237 239 239 234 231 227 226 221 216 215 212 212 204 205 178 254 255 254 252 247 237 203 173 164 136 101 121 206 198 198 203 209 212 216 222 221 218 207 205 199 195 198 207 212 219 227 234 237 240 243 241 237 232 227 219 214 209 209 205 198 196 194 169 254 254 254 252 248 248 248 208 203 186 154 135 177 201 197 202 202 208 215 221 223 217 214 211 209 210 213 216 220 226 234 237 241 244 243 242 236 227 219 213 210 205 200 190 182 194 191 175 254 254 251 252 239 231 237 217 203 185 168 119 132 199 191 195 196 204 214 223 225 223 220 217 219 224 228 234 237 240 243 244 246 246 243 239 231 212 204 205 199 192 187 173 180 199 150 148 255 249 250 249 220 213 212 194 184 176 150 136 90 151 198 188 191 200 214 221 228 235 234 234 238 241 241 243 245 245 245 247 246 243 239 229 213 199 197 191 182 176 164 162 189 152 115 149 254 247 251 253 240 207 208 197 182 177 153 113 82 100 182 180 182 194 210 221 229 237 242 241 241 241 240 241 243 244 245 244 241 232 222 203 188 184 180 169 161 157 150 172 173 133 148 168 249 245 250 246 249 224 223 205 199 197 137 106 114 105 138 177 172 180 196 213 224 236 240 241 238 239 239 240 239 239 236 231 224 213 189 172 172 172 162 151 147 148 163 178 168 171 172 174 245 237 252 233 251 248 232 231 225 191 143 114 124 145 114 116 168 166 172 184 207 223 228 231 230 231 233 224 217 212 208 202 190 166 151 156 159 160 145 139 144 158 165 174 177 174 175 193 252 226 248 240 230 248 254 252 215 175 138 133 157 154 111 69 94 152 152 156 174 192 196 201 204 199 193 184 181 181 176 163 138 124 146 166 166 147 136 140 154 160 146 143 146 155 155 128     1
196 197 198 199 200 202 198 90 40 21 68 145 150 152 158 169 177 179 179 179 179 179 178 178 180 180 177 178 181 181 179 176 172 167 161 159 159 118 28 32 41 171 215 211 212 211 210 210 197 199 197 201 199 203 173 50 32 25 105 146 150 156 161 170 179 182 182 179 180 179 179 180 181 180 179 180 180 179 178 178 174 167 163 160 157 142 53 31 32 129 222 209 211 210 209 207 197 197 198 201 199 205 112 29 45 49 124 145 152 159 166 175 182 184 183 182 180 179 179 181 183 181 178 179 180 180 183 181 178 171 164 161 156 152 83 27 40 57 199 216 210 209 208 208 195 198 201 200 201 210 78 32 48 60 131 146 152 160 167 175 179 182 183 184 182 182 180 179 180 180 178 180 183 184 184 184 182 177 168 165 157 155 100 30 49 33 143 221 209 209 208 210 199 200 201 201 206 182 42 39 41 68 135 145 154 163 170 178 181 185 184 181 180 184 183 179 177 179 179 182 185 187 186 185 180 176 173 167 159 159 109 33 49 37 105 219 209 209 209 209 199 200 201 202 206 179 53 33 48 76 137 148 159 167 170 174 180 187 187 184 179 179 179 180 179 178 178 182 186 186 183 177 172 173 172 169 163 162 119 36 50 43 73 207 212 209 208 209 196 198 197 196 199 185 56 34 54 75 135 151 162 159 160 166 174 180 180 184 179 178 179 176 176 177 178 180 181 177 173 170 165 152 143 152 167 165 124 43 49 40 56 195 213 207 210 211 192 190 188 187 191 179 57 31 46 68 130 154 133 108 120 140 149 163 169 174 175 178 176 170 172 172 174 175 169 157 142 135 120 111 107 101 145 167 126 47 41 38 45 181 215 207 211 211 183 180 183 187 187 187 69 24 39 68 129 133 98 79 74 84 90 105 122 139 154 164 169 166 172 172 165 162 147 107 71 62 64 69 84 82 103 153 134 50 40 41 40 168 218 209 210 211 180 182 183 185 186 188 71 25 34 74 132 104 80 86 83 66 50 48 62 97 112 128 151 159 168 168 157 131 105 91 66 63 83 102 111 104 99 123 139 57 32 40 42 170 218 210 212 212 181 183 184 182 182 187 84 23 31 89 128 104 123 129 133 122 94 75 68 78 91 111 134 144 153 160 152 122 109 93 95 106 124 137 148 143 127 113 143 76 27 38 44 175 213 208 213 213 179 180 182 183 181 192 105 23 38 119 135 118 136 139 128 112 98 80 78 93 92 94 126 154 157 163 146 115 97 99 78 70 81 97 123 150 156 133 130 101 30 38 55 186 207 205 211 213 179 177 178 179 179 193 131 29 56 118 104 120 148 130 96 73 62 64 56 70 101 106 92 105 95 94 101 95 111 97 72 78 69 68 73 110 141 155 128 89 70 43 66 195 205 204 207 210 178 177 178 179 179 190 144 42 66 92 97 136 130 92 70 95 82 77 94 78 109 131 86 90 141 143 109 84 126 100 96 83 68 92 87 72 113 136 141 122 76 48 88 206 202 204 204 205 176 175 177 178 180 184 160 44 53 124 129 130 125 70 83 109 52 57 93 94 127 128 79 128 187 190 151 74 115 110 98 70 50 84 112 64 95 130 128 117 56 28 119 215 202 203 203 203 176 174 175 178 179 180 172 92 42 110 122 103 110 106 120 120 98 99 101 113 136 133 100 149 182 181 176 111 124 129 112 107 109 124 127 127 129 128 143 149 66 78 146 210 204 205 204 202 176 176 178 178 178 181 164 158 95 132 156 137 110 104 127 139 126 113 112 125 127 110 127 171 178 180 181 157 114 117 116 119 131 135 128 123 125 142 161 154 93 146 174 203 206 207 204 203 175 175 179 177 179 182 142 120 88 138 155 158 157 136 115 113 110 115 125 122 120 136 174 180 171 177 169 173 161 135 130 122 123 131 137 151 166 161 159 162 104 99 128 193 208 203 203 201 172 173 175 175 178 179 136 140 82 141 156 158 165 167 162 160 158 152 148 142 144 154 176 173 173 176 168 167 165 160 156 158 156 165 168 173 179 169 163 164 120 141 111 161 204 202 202 200 173 175 176 176 175 172 142 172 96 136 156 164 169 168 165 166 161 152 152 152 138 150 164 164 174 176 166 163 148 146 168 168 170 171 173 178 182 173 166 164 130 162 131 139 198 203 201 200 174 175 175 175 177 167 152 146 98 139 152 164 168 166 170 175 176 174 166 128 122 168 160 162 180 180 170 155 161 129 148 171 171 172 170 173 180 173 165 166 116 134 157 143 201 203 202 200 173 175 172 172 178 167 158 127 112 146 148 161 167 165 171 174 173 164 139 99 138 174 163 173 183 179 181 159 169 144 115 155 169 174 171 170 172 173 165 164 126 117 154 158 205 202 202 201 175 176 173 173 174 169 163 145 133 144 147 160 167 167 169 167 163 147 114 100 142 141 131 158 160 159 174 149 146 153 109 137 164 172 168 170 171 168 164 161 136 134 152 166 201 200 202 201 175 175 173 173 175 170 162 166 157 136 145 158 163 165 169 168 157 138 104 124 144 104 82 111 141 143 153 109 107 151 134 127 161 169 169 170 169 166 164 156 151 161 157 177 193 191 192 194 175 175 174 174 175 171 166 163 164 138 140 156 160 163 166 161 150 130 113 146 153 139 132 121 116 125 130 133 137 150 158 134 156 168 167 170 166 163 164 155 158 160 165 178 186 184 184 184 173 172 174 174 176 171 165 159 150 139 134 153 160 160 160 156 140 119 129 150 152 146 145 149 142 142 148 145 142 151 156 148 147 171 166 166 161 160 163 153 151 157 168 173 184 183 183 182 172 172 173 175 176 174 158 145 148 137 130 146 157 159 160 155 134 118 138 142 145 149 154 156 155 155 162 156 147 149 149 159 147 166 169 166 158 156 160 150 146 147 161 177 183 181 181 180 170 173 174 174 173 174 169 142 140 136 130 137 154 158 159 155 126 127 141 135 147 162 172 170 167 165 174 170 161 152 148 161 154 160 176 165 155 157 155 149 145 145 171 183 179 181 181 181 168 169 172 173 174 174 177 175 178 161 127 131 147 152 158 155 126 136 139 138 151 151 146 147 156 159 155 153 156 162 151 155 160 155 175 164 155 154 150 153 186 186 185 181 180 181 181 181 166 169 173 174 173 174 176 178 183 166 129 129 143 151 158 156 129 139 139 139 121 111 118 130 135 136 139 123 119 138 157 153 162 155 172 163 156 151 146 155 187 187 181 179 179 181 182 179 167 170 173 174 174 174 176 176 180 171 131 133 140 147 156 153 130 138 137 92 92 153 175 192 198 195 195 155 130 93 125 154 154 160 169 162 153 151 149 158 188 187 182 181 179 180 181 179 169 173 175 174 172 175 176 175 181 178 135 133 137 143 151 151 131 138 105 13 42 120 136 152 145 146 146 106 55 29 60 144 152 162 169 161 152 152 149 161 190 187 184 182 182 181 183 183 172 175 172 171 173 175 176 175 179 182 140 131 138 144 148 155 136 128 62 0 64 118 137 136 134 140 133 127 103 46 26 103 150 160 169 161 154 155 146 169 198 189 185 183 183 183 189 190 170 171 170 171 174 177 177 178 178 184 147 128 138 142 147 159 138 116 77 12 101 132 129 130 138 131 128 134 118 58 27 88 144 157 167 160 155 154 143 180 201 189 183 181 181 185 189 188 170 170 171 174 177 177 176 179 181 185 158 127 137 140 146 161 130 107 124 36 86 136 126 122 142 133 116 142 119 40 58 122 136 151 167 160 154 152 146 189 200 191 184 180 180 184 191 190 170 170 173 176 179 176 176 179 183 184 174 129 131 136 140 154 121 115 139 95 55 128 123 116 131 121 116 128 119 85 98 149 135 146 167 158 151 144 153 197 197 190 185 181 179 182 191 192 170 172 175 177 176 176 176 181 184 184 182 135 128 137 140 153 121 126 139 121 87 87 109 117 128 119 106 123 143 113 134 156 149 144 165 156 152 139 164 202 197 190 186 182 178 181 188 191 170 173 175 176 176 176 178 180 181 184 184 146 126 140 140 151 127 132 145 119 122 126 107 113 99 107 102 124 118 112 140 163 157 145 162 152 148 138 172 196 196 191 188 184 180 182 187 188 172 174 176 176 176 178 181 181 179 174 173 153 127 135 140 147 138 137 144 135 132 163 158 127 110 119 129 140 136 133 152 166 153 148 162 152 143 143 180 186 187 186 189 186 179 182 185 187 173 174 174 173 175 179 179 180 172 162 169 164 132 130 136 145 145 139 140 140 138 144 154 152 157 156 153 151 147 150 154 162 151 156 158 151 140 154 188 186 184 184 187 183 178 176 183 188 171 171 172 174 177 172 164 165 164 165 166 173 145 127 134 143 147 143 137 132 132 134 125 119 119 125 130 138 144 148 154 157 153 155 152 147 138 164 189 184 184 183 181 183 183 176 178 183 170 168 166 164 164 160 160 163 166 166 165 186 165 128 134 139 150 149 145 134 133 135 131 133 140 136 135 139 143 151 153 156 157 153 145 143 136 157 192 183 185 181 180 183 182 180 176 174 157 154 152 156 158 159 160 161 163 169 178 192 181 129 127 134 140 149 151 152 146 143 146 157 160 156 153 151 154 155 158 157 153 141 140 136 121 130 205 182 181 180 180 183 182 181 175 176 149 148 152 156 158 158 156 161 174 188 167 190 204 131 108 127 131 142 144 152 155 144 147 151 148 152 150 152 154 160 160 155 143 136 131 131 92 127 220 194 185 187 189 183 179 181 178 176 149 148 149 151 154 159 185 200 201 186 155 200 214 166 82 117 128 129 138 148 151 145 145 148 150 152 153 148 146 155 151 140 133 129 122 125 68 141 219 208 194 209 218 206 194 184 177 172 148 146 151 168 177 197 218 215 208 177 169 207 208 214 102 81 117 119 131 139 142 143 141 144 146 144 144 142 147 152 143 132 128 114 126 106 73 158 214 213 191 196 219 215 217 210 196 180 139 158 187 195 206 213 213 213 203 181 187 207 208 216 178 61 95 104 113 127 136 133 131 133 138 137 136 142 144 136 130 125 119 127 131 90 98 164 214 215 205 191 205 202 203 211 203 204 162 190 191 201 207 217 219 207 202 188 194 208 210 210 213 115 65 102 102 109 121 128 126 125 125 127 121 126 128 124 120 117 129 137 125 82 110 167 214 215 211 203 197 196 204 207 197 192                                                                                                                                                                             1
30 28 28 29 31 30 42 68 79 81 77 67 67 71 63 61 78 108 142 147 123 113 111 107 113 125 136 145 137 118 101 85 64 59 71 83 85 81 96 109 102 94 84 80 71 60 50 44 30 27 28 29 32 30 51 72 73 76 68 63 58 49 39 38 58 73 71 105 124 112 104 108 113 124 132 141 131 115 94 77 66 79 90 88 96 113 120 111 91 78 76 75 72 64 52 40 31 28 30 33 33 32 63 78 77 80 68 38 33 38 83 39 61 75 39 47 98 118 102 102 116 126 139 147 129 106 89 76 81 92 87 95 102 93 82 66 56 53 59 63 65 61 54 37 30 30 32 38 33 35 75 84 95 98 85 84 75 66 75 56 48 34 54 70 70 117 106 98 117 126 142 146 128 101 82 73 78 92 96 83 62 48 49 51 46 50 48 46 56 57 56 36 31 30 31 36 32 45 83 90 106 112 108 108 121 126 102 98 106 96 98 92 56 104 107 104 113 132 146 150 128 103 82 67 94 101 77 46 32 64 93 30 33 36 48 47 46 51 55 38 30 28 34 45 39 58 84 95 113 121 128 117 111 118 119 122 112 120 123 117 98 110 119 112 120 137 153 150 122 99 75 72 108 88 56 85 49 37 49 31 56 42 31 42 42 50 56 37 31 29 32 54 49 67 87 100 118 127 140 148 141 126 112 105 103 101 105 115 132 132 125 120 131 148 162 158 127 96 71 90 105 70 78 100 65 47 44 41 64 49 36 37 46 52 59 36 31 29 30 56 64 72 90 106 119 131 143 154 157 151 145 140 140 141 141 145 135 126 124 125 138 152 160 156 133 100 87 101 105 95 95 96 88 78 61 54 54 65 64 55 55 63 63 35 32 29 27 52 82 76 93 109 120 132 144 158 160 158 157 157 157 160 161 155 141 131 127 132 141 151 160 152 132 108 102 114 110 104 87 78 75 74 69 71 75 70 71 79 75 70 65 37 31 31 27 45 84 78 97 108 115 131 145 156 163 167 166 165 162 163 162 154 141 136 133 137 147 154 159 153 133 115 107 124 122 122 114 104 87 77 76 76 77 85 90 87 79 70 63 36 32 31 28 40 63 76 98 112 122 132 147 157 160 169 171 171 172 169 161 148 140 136 135 137 152 158 162 154 133 121 117 126 134 137 140 138 124 119 120 119 124 124 111 98 84 73 62 38 31 30 30 36 52 69 100 114 130 138 155 165 170 177 180 179 176 170 159 148 140 139 144 140 145 158 160 155 140 128 120 122 136 148 153 152 145 140 142 142 144 134 117 102 87 74 59 46 30 30 30 49 61 69 99 110 127 144 157 165 177 184 184 179 173 170 164 150 137 136 149 158 152 156 159 155 146 131 121 122 134 146 161 163 160 155 151 147 145 128 109 98 83 72 65 40 32 27 41 162 141 77 99 112 123 146 157 168 175 180 183 180 177 170 153 140 136 137 156 166 165 159 159 159 150 127 118 126 138 144 159 169 169 163 164 157 141 125 106 90 83 76 60 31 31 29 31 74 78 69 101 109 116 143 156 167 169 171 175 175 171 156 155 157 144 148 165 170 169 166 164 163 155 136 119 122 139 153 156 168 175 171 167 157 139 120 105 91 84 77 44 33 31 29 29 27 35 64 99 109 110 136 151 156 163 167 170 167 145 147 179 173 156 158 175 178 177 176 173 169 160 146 131 126 125 150 159 168 175 170 163 157 141 117 95 90 87 70 35 37 30 28 29 33 25 61 101 112 113 133 147 152 159 161 163 154 115 108 129 171 169 162 176 186 189 189 187 181 166 153 146 154 154 140 155 166 174 168 161 153 138 118 101 92 88 59 35 33 31 29 28 31 28 58 101 112 119 129 142 146 147 152 148 131 105 83 73 90 126 137 147 168 177 180 181 179 163 149 157 165 150 126 141 162 168 165 158 141 127 114 106 92 79 46 39 36 33 30 28 32 27 53 98 109 114 122 134 135 137 144 131 107 114 108 94 79 71 97 121 139 156 156 151 138 124 122 121 96 77 96 131 149 155 155 148 136 113 109 101 86 87 55 40 39 33 31 29 31 27 46 94 107 111 118 125 128 131 127 105 108 135 141 130 116 96 81 97 121 130 126 112 91 71 58 51 53 69 99 127 138 143 144 140 130 115 108 97 78 106 82 32 36 34 31 27 31 29 37 86 106 112 116 119 120 118 102 94 123 143 149 146 139 124 104 81 80 84 82 75 62 64 73 84 94 107 114 114 128 135 133 127 119 114 100 91 79 123 51 34 34 33 30 27 29 32 31 76 102 109 116 120 117 109 98 117 135 144 152 153 151 146 137 120 85 70 70 78 100 110 120 124 129 125 107 108 118 123 121 117 108 104 92 80 74 97 45 35 35 32 30 27 29 33 27 63 99 106 113 117 121 110 105 117 136 144 155 156 154 154 160 166 158 143 139 147 146 136 134 133 125 114 94 89 104 113 113 108 96 90 83 77 57 27 34 36 35 33 30 26 28 29 28 44 90 104 114 121 123 115 89 92 126 143 158 159 157 155 161 171 173 166 166 166 152 139 133 126 118 107 89 65 81 102 101 97 88 81 78 70 40 36 36 35 33 32 30 28 29 30 29 30 77 98 110 121 127 114 102 79 61 101 122 135 143 153 158 168 179 173 170 164 142 130 128 124 117 103 87 72 65 88 93 89 80 77 74 56 32 37 36 34 32 35 32 27 27 28 29 26 53 92 106 115 128 118 120 139 99 113 158 122 121 117 126 138 156 157 148 146 127 116 118 115 113 94 77 72 72 82 84 77 78 77 68 40 28 33 34 35 32 34 32 29 29 26 29 28 32 77 101 112 122 125 132 148 149 143 189 199 195 153 156 135 104 103 111 93 97 88 84 73 60 51 56 66 84 89 84 74 74 76 55 29 32 31 32 35 32 33 32 27 29 28 28 29 27 51 90 104 115 124 141 151 147 149 163 181 210 218 222 208 169 180 185 150 161 143 111 68 68 81 94 79 96 94 82 80 75 66 34 30 31 30 35 34 34 33 32 27 29 27 27 28 28 36 78 97 105 120 136 148 150 151 156 158 166 189 215 219 218 223 216 209 190 161 123 100 106 122 120 94 103 95 84 77 72 42 25 32 30 30 38 34 32 32 30 27 27 29 28 28 27 38 65 86 99 110 125 137 146 147 152 165 170 158 157 167 171 178 177 156 142 133 121 105 111 128 122 101 99 86 81 72 49 28 27 36 34 31 39 34 29 30 29 28 27 29 27 28 28 42 68 77 93 102 117 126 134 140 138 132 157 182 177 165 160 165 170 166 154 137 118 113 114 124 118 100 83 76 69 48 28 27 27 33 35 36 40 32 29 29 27 28 29 29 28 29 27 49 76 76 86 102 113 125 122 127 137 127 117 130 155 168 168 170 162 144 130 121 117 108 109 113 107 85 69 62 47 28 27 28 27 33 35 40 41 34 31 29 28 28 27 28 27 28 39 63 81 84 79 95 115 129 125 110 117 121 114 104 98 102 109 109 101 102 111 111 101 91 99 99 84 68 56 41 29 26 27 29 28 34 34 37 39 33 32 29 28 27 27 28 32 44 56 71 86 93 80 82 109 131 140 120 106 96 102 105 98 95 98 97 98 100 94 83 80 87 91 79 69 53 36 30 28 26 28 31 29 31 33 37 39 32 31 28 26 30 35 46 57 62 67 78 90 97 92 77 96 124 143 145 131 113 97 89 82 80 78 82 81 79 73 71 89 96 82 68 54 35 26 29 28 26 28 31 28 29 32 37 38 32 32 39 47 56 65 74 74 72 76 86 92 99 100 85 76 110 136 146 152 148 139 125 114 105 97 96 91 90 91 103 110 92 72 53 30 25 29 30 27 27 27 31 27 30 34 38 39 32 33 63 72 81 89 85 81 80 87 91 97 98 98 92 80 90 119 135 143 147 152 151 144 143 137 135 135 131 126 125 108 77 51 28 26 26 31 29 26 27 27 31 27 28 35 37 35 31 31 78 86 95 97 91 84 88 94 97 103 101 101 93 89 82 92 119 129 136 143 143 140 143 144 145 144 137 128 110 84 52 26 27 27 27 33 27 27 27 28 32 28 29 36 35 35 32 31 86 95 101 99 94 92 95 97 100 109 102 104 99 92 89 77 85 104 112 119 121 121 129 129 128 124 112 100 76 53 32 25 26 27 30 34 27 27 28 31 30 27 32 37 35 37 33 33 95 103 101 97 98 95 99 104 104 110 108 106 102 92 89 83 66 69 76 85 86 92 100 99 98 91 80 66 47 35 31 25 26 29 35 33 26 27 28 30 30 28 33 40 35 34 33 34 98 104 100 98 102 98 104 112 109 111 111 107 103 97 86 82 77 65 53 56 66 76 72 66 58 50 45 39 36 37 32 26 26 30 36 31 27 28 26 27 28 27 33 39 37 36 31 31 99 99 97 103 106 107 113 114 113 114 116 114 108 101 93 83 78 70 60 51 46 45 42 39 39 42 40 39 40 41 35 27 26 32 36 29 27 29 27 28 29 27 33 39 37 35 31 31 101 98 101 107 105 110 116 115 117 114 114 114 107 102 96 88 79 71 60 54 49 45 45 45 44 45 43 43 45 44 35 27 26 32 34 28 26 26 27 27 28 26 32 37 34 35 30 30 103 102 106 109 105 114 117 121 122 119 111 110 104 102 99 91 84 77 66 58 54 50 51 53 52 50 49 47 49 44 37 29 26 34 34 27 26 28 29 28 28 26 31 37 34 35 30 30 104 107 107 111 110 114 118 124 128 126 116 115 107 99 96 95 89 82 76 70 64 62 61 60 58 52 52 52 52 50 40 32 26 35 32 28 26 27 30 27 27 27 29 35 35 34 30 31 104 109 110 108 112 114 117 126 132 133 119 117 116 97 92 89 85 84 80 78 72 71 74 67 56 57 60 57 59 55 44 35 28 38 31 28 26 26 30 27 27 26 26 34 35 35 30 30 102 105 108 107 108 113 116 124 132 136 124 119 117 103 96 91 89 84 82 79 81 81 78 65 61 68 68 62 63 56 45 37 30 38 32 27 27 27 32 27 27 26 28 35 35 35 31 29 93 96 100 101 104 109 117 120 134 137 129 123 122 106 97 92 91 90 87 84 89 91 79 64 70 74 73 69 65 56 47 36 30 35 33 29 28 29 30 27 27 26 28 35 35 35 30 28                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                            1
Name: pixels, Length: 34034, dtype: int64

Data Exploration

In [15]:
df_explore = df.copy()
df_explore.head()
Out[15]:
emotion pixels Usage
0 0 70 80 82 72 58 58 60 63 54 58 60 48 89 115 121... Training
1 0 151 150 147 155 148 133 111 140 170 174 182 15... Training
2 2 231 212 156 164 174 138 161 173 182 200 106 38... Training
3 4 24 32 36 30 32 23 19 20 30 41 21 22 32 34 21 1... Training
4 6 4 0 0 0 0 0 0 0 0 0 0 0 3 15 23 28 48 50 58 84... Training
In [16]:
decoded_emotions = {0:'Angry', 
                    1:'Disgusted', 
                    2:'Scared', 
                    3:'Happy', 
                    4: 'Sad', 
                    5: 'Suprised', 
                    6: 'Neutral'}

print("Distribution of samples per emotion:\n{}".format(df_explore.groupby("emotion").size()))
Distribution of samples per emotion:
emotion
0    4953
1     547
2    5121
3    8989
4    6077
5    4002
6    6198
dtype: int64
In [21]:
df_explore['emotion'].isna().sum()
Out[21]:
0
In [22]:
df_explore['pixels'].isna().sum()
Out[22]:
0
In [23]:
df_explore['Usage'].isna().sum()
Out[23]:
0
In [24]:
df_explore.hist(bins=30, figsize=(10, 5), legend=True, color = (0.5,0.8,0.2,1))
plt.title('Visualisation of samples per emotion')
plt.xlabel('emotion')
plt.ylabel('count')
plt.xticks(np.array(list(decoded_emotions.keys())), decoded_emotions.values())
plt.tight_layout()
plt.show()
In [25]:
plot_all_emotions()
In [18]:
# plot_one_emotion(emotion=1, location=1) # specific location
emotion = 3 # select an emotion key
plot_one_emotion(emotion=emotion, location=random.randint(1, 100)) # random location
Out[18]:
<matplotlib.image.AxesImage at 0x7f557488b250>

Preparing the Data for Training

In [17]:
# Training: 70
# Testing: 20
# Validation: 10
x_train, x_test, y_train, y_test = load_fer_dataset(df) # splitting the data into train/test
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=45) # creating a validation set

num_classes = len(decoded_emotions)
y_train, y_test, y_val = preprocess_data(y_train, y_test, y_val, num_classes=num_classes) # preprocessing labels
In [18]:
len(df)
Out[18]:
35887
In [19]:
len(x_train) + len(x_test) + len(x_val) # after oversampling
Out[19]:
62923
In [20]:
len(x_train), len(x_test), len(x_val)
Out[20]:
(45304, 12585, 5034)

Initial Models for Training

CNN Model 1

In [40]:
def model_1():
  model = models.Sequential()

  # 1st convolutional layer
  model.add(layers.Conv2D(32, kernel_size=3, strides=1, padding="valid", activation='relu', input_shape=(x_train.shape[1:])))

  # 2nd convolutional layer
  model.add(layers.Conv2D(64, kernel_size=3, strides=1, padding="same", activation='relu'))
  model.add(layers.MaxPooling2D((2, 2)))

  # 3rd convolutional layer
  model.add(layers.Conv2D(64, kernel_size=3, strides=1, padding="valid", activation='relu'))

  # 4th convolutional layer
  model.add(layers.Conv2D(128, kernel_size=3, strides=1, padding="same", activation='relu'))
  model.add(layers.MaxPooling2D((2, 2)))

  # 5th convolutional layer
  model.add(layers.Conv2D(128, kernel_size=3, strides=1, padding="valid", activation='relu'))
  model.add(layers.MaxPooling2D((2, 2)))
  model.add(layers.Flatten())

  # Dense layers
  model.add(layers.Dense(256, activation='relu'))
  model.add(layers.Dense(7, activation='softmax'))

  # compile the model
  model.compile(optimizer=optimizers.Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
  return model
In [41]:
model = model_1()
plot_model(model, to_file="model.png", show_shapes=True, show_layer_names=True)
Out[41]:
In [42]:
history1 = train_model(model, 30, 64)
model.evaluate(x_test, y_test)
Epoch 1/30
708/708 [==============================] - ETA: 0s - loss: 1.7275 - accuracy: 0.3194
Epoch 1: val_loss improved from inf to 1.53934, saving model to best-model.h5
708/708 [==============================] - 14s 17ms/step - loss: 1.7275 - accuracy: 0.3194 - val_loss: 1.5393 - val_accuracy: 0.4182 - lr: 1.0000e-04
Epoch 2/30
705/708 [============================>.] - ETA: 0s - loss: 1.4399 - accuracy: 0.4635
Epoch 2: val_loss improved from 1.53934 to 1.35400, saving model to best-model.h5
708/708 [==============================] - 11s 16ms/step - loss: 1.4396 - accuracy: 0.4635 - val_loss: 1.3540 - val_accuracy: 0.4960 - lr: 1.0000e-04
Epoch 3/30
708/708 [==============================] - ETA: 0s - loss: 1.2667 - accuracy: 0.5315
Epoch 3: val_loss improved from 1.35400 to 1.23979, saving model to best-model.h5
708/708 [==============================] - 11s 16ms/step - loss: 1.2667 - accuracy: 0.5315 - val_loss: 1.2398 - val_accuracy: 0.5465 - lr: 1.0000e-04
Epoch 4/30
705/708 [============================>.] - ETA: 0s - loss: 1.1215 - accuracy: 0.5877
Epoch 4: val_loss improved from 1.23979 to 1.14390, saving model to best-model.h5
708/708 [==============================] - 11s 16ms/step - loss: 1.1215 - accuracy: 0.5878 - val_loss: 1.1439 - val_accuracy: 0.5691 - lr: 1.0000e-04
Epoch 5/30
705/708 [============================>.] - ETA: 0s - loss: 1.0004 - accuracy: 0.6335
Epoch 5: val_loss improved from 1.14390 to 1.06031, saving model to best-model.h5
708/708 [==============================] - 11s 16ms/step - loss: 1.0004 - accuracy: 0.6337 - val_loss: 1.0603 - val_accuracy: 0.6047 - lr: 1.0000e-04
Epoch 6/30
706/708 [============================>.] - ETA: 0s - loss: 0.8995 - accuracy: 0.6750
Epoch 6: val_loss improved from 1.06031 to 1.01505, saving model to best-model.h5
708/708 [==============================] - 12s 16ms/step - loss: 0.8993 - accuracy: 0.6752 - val_loss: 1.0151 - val_accuracy: 0.6317 - lr: 1.0000e-04
Epoch 7/30
705/708 [============================>.] - ETA: 0s - loss: 0.8081 - accuracy: 0.7098
Epoch 7: val_loss improved from 1.01505 to 0.96490, saving model to best-model.h5
708/708 [==============================] - 11s 16ms/step - loss: 0.8081 - accuracy: 0.7099 - val_loss: 0.9649 - val_accuracy: 0.6599 - lr: 1.0000e-04
Epoch 8/30
705/708 [============================>.] - ETA: 0s - loss: 0.7232 - accuracy: 0.7448
Epoch 8: val_loss improved from 0.96490 to 0.94204, saving model to best-model.h5
708/708 [==============================] - 11s 16ms/step - loss: 0.7231 - accuracy: 0.7448 - val_loss: 0.9420 - val_accuracy: 0.6762 - lr: 1.0000e-04
Epoch 9/30
708/708 [==============================] - ETA: 0s - loss: 0.6458 - accuracy: 0.7729
Epoch 9: val_loss did not improve from 0.94204
708/708 [==============================] - 11s 16ms/step - loss: 0.6458 - accuracy: 0.7729 - val_loss: 0.9766 - val_accuracy: 0.6754 - lr: 1.0000e-04
Epoch 10/30
705/708 [============================>.] - ETA: 0s - loss: 0.5687 - accuracy: 0.8032
Epoch 10: val_loss improved from 0.94204 to 0.92767, saving model to best-model.h5
708/708 [==============================] - 11s 16ms/step - loss: 0.5687 - accuracy: 0.8033 - val_loss: 0.9277 - val_accuracy: 0.6935 - lr: 1.0000e-04
Epoch 11/30
708/708 [==============================] - ETA: 0s - loss: 0.5030 - accuracy: 0.8286
Epoch 11: val_loss did not improve from 0.92767
708/708 [==============================] - 11s 16ms/step - loss: 0.5030 - accuracy: 0.8286 - val_loss: 0.9493 - val_accuracy: 0.7000 - lr: 1.0000e-04
Epoch 12/30
705/708 [============================>.] - ETA: 0s - loss: 0.4361 - accuracy: 0.8530
Epoch 12: val_loss improved from 0.92767 to 0.91674, saving model to best-model.h5
708/708 [==============================] - 12s 16ms/step - loss: 0.4360 - accuracy: 0.8531 - val_loss: 0.9167 - val_accuracy: 0.7275 - lr: 1.0000e-04
Epoch 13/30
705/708 [============================>.] - ETA: 0s - loss: 0.3751 - accuracy: 0.8756
Epoch 13: val_loss did not improve from 0.91674
708/708 [==============================] - 11s 16ms/step - loss: 0.3749 - accuracy: 0.8757 - val_loss: 0.9382 - val_accuracy: 0.7372 - lr: 1.0000e-04
Epoch 14/30
706/708 [============================>.] - ETA: 0s - loss: 0.3237 - accuracy: 0.8936
Epoch 14: val_loss did not improve from 0.91674
708/708 [==============================] - 12s 16ms/step - loss: 0.3238 - accuracy: 0.8936 - val_loss: 0.9720 - val_accuracy: 0.7418 - lr: 1.0000e-04
Epoch 15/30
705/708 [============================>.] - ETA: 0s - loss: 0.2755 - accuracy: 0.9089
Epoch 15: val_loss did not improve from 0.91674

Epoch 15: ReduceLROnPlateau reducing learning rate to 3.1622775802825264e-05.
708/708 [==============================] - 12s 16ms/step - loss: 0.2758 - accuracy: 0.9088 - val_loss: 1.0477 - val_accuracy: 0.7390 - lr: 1.0000e-04
Epoch 16/30
708/708 [==============================] - ETA: 0s - loss: 0.1583 - accuracy: 0.9560
Epoch 16: val_loss did not improve from 0.91674
708/708 [==============================] - 12s 16ms/step - loss: 0.1583 - accuracy: 0.9560 - val_loss: 1.0132 - val_accuracy: 0.7680 - lr: 3.1623e-05
Epoch 17/30
705/708 [============================>.] - ETA: 0s - loss: 0.1326 - accuracy: 0.9642
Epoch 17: val_loss did not improve from 0.91674
708/708 [==============================] - 12s 16ms/step - loss: 0.1325 - accuracy: 0.9642 - val_loss: 1.0862 - val_accuracy: 0.7721 - lr: 3.1623e-05
Epoch 18/30
705/708 [============================>.] - ETA: 0s - loss: 0.1169 - accuracy: 0.9687
Epoch 18: val_loss did not improve from 0.91674

Epoch 18: ReduceLROnPlateau reducing learning rate to 9.999999259090306e-06.
708/708 [==============================] - 12s 16ms/step - loss: 0.1168 - accuracy: 0.9687 - val_loss: 1.1077 - val_accuracy: 0.7769 - lr: 3.1623e-05
Epoch 19/30
705/708 [============================>.] - ETA: 0s - loss: 0.0834 - accuracy: 0.9814
Epoch 19: val_loss did not improve from 0.91674
708/708 [==============================] - 12s 16ms/step - loss: 0.0835 - accuracy: 0.9813 - val_loss: 1.1352 - val_accuracy: 0.7827 - lr: 1.0000e-05
Epoch 20/30
708/708 [==============================] - ETA: 0s - loss: 0.0760 - accuracy: 0.9839
Epoch 20: val_loss did not improve from 0.91674
708/708 [==============================] - 12s 16ms/step - loss: 0.0760 - accuracy: 0.9839 - val_loss: 1.1543 - val_accuracy: 0.7835 - lr: 1.0000e-05
Epoch 21/30
705/708 [============================>.] - ETA: 0s - loss: 0.0712 - accuracy: 0.9850
Epoch 21: val_loss did not improve from 0.91674

Epoch 21: ReduceLROnPlateau reducing learning rate to 3.162277292675049e-06.
708/708 [==============================] - 12s 16ms/step - loss: 0.0711 - accuracy: 0.9850 - val_loss: 1.1847 - val_accuracy: 0.7839 - lr: 1.0000e-05
Epoch 22/30
705/708 [============================>.] - ETA: 0s - loss: 0.0613 - accuracy: 0.9885
Epoch 22: val_loss did not improve from 0.91674
708/708 [==============================] - 12s 16ms/step - loss: 0.0612 - accuracy: 0.9885 - val_loss: 1.1821 - val_accuracy: 0.7872 - lr: 3.1623e-06
Epoch 23/30
708/708 [==============================] - ETA: 0s - loss: 0.0591 - accuracy: 0.9893
Epoch 23: val_loss did not improve from 0.91674
708/708 [==============================] - 12s 16ms/step - loss: 0.0591 - accuracy: 0.9893 - val_loss: 1.1973 - val_accuracy: 0.7878 - lr: 3.1623e-06
Epoch 24/30
705/708 [============================>.] - ETA: 0s - loss: 0.0571 - accuracy: 0.9895
Epoch 24: val_loss did not improve from 0.91674

Epoch 24: ReduceLROnPlateau reducing learning rate to 9.999999115286567e-07.
708/708 [==============================] - 12s 16ms/step - loss: 0.0572 - accuracy: 0.9895 - val_loss: 1.2062 - val_accuracy: 0.7886 - lr: 3.1623e-06
Epoch 25/30
705/708 [============================>.] - ETA: 0s - loss: 0.0541 - accuracy: 0.9907
Epoch 25: val_loss did not improve from 0.91674
708/708 [==============================] - 12s 16ms/step - loss: 0.0540 - accuracy: 0.9907 - val_loss: 1.2078 - val_accuracy: 0.7890 - lr: 1.0000e-06
Epoch 26/30
705/708 [============================>.] - ETA: 0s - loss: 0.0533 - accuracy: 0.9909
Epoch 26: val_loss did not improve from 0.91674
708/708 [==============================] - 12s 16ms/step - loss: 0.0533 - accuracy: 0.9909 - val_loss: 1.2100 - val_accuracy: 0.7896 - lr: 1.0000e-06
Epoch 27/30
708/708 [==============================] - ETA: 0s - loss: 0.0528 - accuracy: 0.9912
Epoch 27: val_loss did not improve from 0.91674

Epoch 27: ReduceLROnPlateau reducing learning rate to 3.1622772926750485e-07.
708/708 [==============================] - 12s 16ms/step - loss: 0.0528 - accuracy: 0.9912 - val_loss: 1.2148 - val_accuracy: 0.7878 - lr: 1.0000e-06
Epoch 28/30
705/708 [============================>.] - ETA: 0s - loss: 0.0518 - accuracy: 0.9914
Epoch 28: val_loss did not improve from 0.91674
708/708 [==============================] - 12s 16ms/step - loss: 0.0517 - accuracy: 0.9915 - val_loss: 1.2148 - val_accuracy: 0.7904 - lr: 3.1623e-07
Epoch 29/30
705/708 [============================>.] - ETA: 0s - loss: 0.0515 - accuracy: 0.9914
Epoch 29: val_loss did not improve from 0.91674
708/708 [==============================] - 12s 16ms/step - loss: 0.0515 - accuracy: 0.9914 - val_loss: 1.2161 - val_accuracy: 0.7898 - lr: 3.1623e-07
Epoch 30/30
708/708 [==============================] - ETA: 0s - loss: 0.0513 - accuracy: 0.9916
Epoch 30: val_loss did not improve from 0.91674

Epoch 30: ReduceLROnPlateau reducing learning rate to 9.99999875577722e-08.
708/708 [==============================] - 12s 16ms/step - loss: 0.0513 - accuracy: 0.9916 - val_loss: 1.2175 - val_accuracy: 0.7902 - lr: 3.1623e-07
394/394 [==============================] - 2s 4ms/step - loss: 1.1413 - accuracy: 0.8024
Out[42]:
[1.1412922143936157, 0.8023837804794312]

CNN Model 2

In [ ]:
def model_2():
  model = models.Sequential()

  # 1st convolutional layer
  model.add(layers.Conv2D(64, kernel_size=3, strides=1, padding='same', activation='relu', input_shape=(x_train.shape[1:])))
  model.add(layers.BatchNormalization())
  model.add(layers.MaxPooling2D((2, 2)))
  model.add(layers.Dropout(0.25))

  # 2nd convolutional layer
  model.add(layers.Conv2D(128, kernel_size=5, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.MaxPooling2D((2, 2)))
  model.add(layers.Dropout(0.25))

  # 3rd convolutional layer
  model.add(layers.Conv2D(512, kernel_size=3, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.MaxPooling2D((2, 2)))
  model.add(layers.Dropout(0.25))

  # 4th convolutional layer
  model.add(layers.Conv2D(512, kernel_size=3, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.MaxPooling2D((2, 2)))
  model.add(layers.Dropout(0.25))
  model.add(layers.Flatten())

  # 1st fully connected dense layer
  model.add(layers.Dense(256, activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.Dropout(0.25))

  # 2nd fully connected dense layer
  model.add(layers.Dense(512, activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.Dropout(0.25))

  # final dense layer with number of classes
  model.add(layers.Dense(7, activation='softmax'))

  # compile the model
  model.compile(optimizer=optimizers.Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
  return model
In [ ]:
model = model_2()
plot_model(model, to_file="model.png", show_shapes=True, show_layer_names=True)
Out[ ]:
In [ ]:
history2 = train_model(model, 30, 64)
model.evaluate(x_test, y_test)
Epoch 1/30
708/708 [==============================] - ETA: 0s - loss: 2.2622 - accuracy: 0.2163
Epoch 1: val_loss improved from inf to 2.00889, saving model to best-model.h5
708/708 [==============================] - 31s 28ms/step - loss: 2.2622 - accuracy: 0.2163 - val_loss: 2.0089 - val_accuracy: 0.2437 - lr: 1.0000e-04
Epoch 2/30
707/708 [============================>.] - ETA: 0s - loss: 1.9367 - accuracy: 0.2967
Epoch 2: val_loss improved from 2.00889 to 1.71000, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 1.9364 - accuracy: 0.2968 - val_loss: 1.7100 - val_accuracy: 0.3607 - lr: 1.0000e-04
Epoch 3/30
706/708 [============================>.] - ETA: 0s - loss: 1.7139 - accuracy: 0.3693
Epoch 3: val_loss improved from 1.71000 to 1.55323, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 1.7135 - accuracy: 0.3695 - val_loss: 1.5532 - val_accuracy: 0.4261 - lr: 1.0000e-04
Epoch 4/30
706/708 [============================>.] - ETA: 0s - loss: 1.5288 - accuracy: 0.4328
Epoch 4: val_loss improved from 1.55323 to 1.46643, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 1.5285 - accuracy: 0.4329 - val_loss: 1.4664 - val_accuracy: 0.4740 - lr: 1.0000e-04
Epoch 5/30
706/708 [============================>.] - ETA: 0s - loss: 1.3797 - accuracy: 0.4872
Epoch 5: val_loss improved from 1.46643 to 1.25800, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 1.3790 - accuracy: 0.4874 - val_loss: 1.2580 - val_accuracy: 0.5358 - lr: 1.0000e-04
Epoch 6/30
706/708 [============================>.] - ETA: 0s - loss: 1.2607 - accuracy: 0.5279
Epoch 6: val_loss improved from 1.25800 to 1.14641, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 1.2610 - accuracy: 0.5278 - val_loss: 1.1464 - val_accuracy: 0.5723 - lr: 1.0000e-04
Epoch 7/30
707/708 [============================>.] - ETA: 0s - loss: 1.1533 - accuracy: 0.5676
Epoch 7: val_loss improved from 1.14641 to 1.04183, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 1.1532 - accuracy: 0.5676 - val_loss: 1.0418 - val_accuracy: 0.6118 - lr: 1.0000e-04
Epoch 8/30
707/708 [============================>.] - ETA: 0s - loss: 1.0698 - accuracy: 0.6001
Epoch 8: val_loss improved from 1.04183 to 0.99490, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 1.0694 - accuracy: 0.6003 - val_loss: 0.9949 - val_accuracy: 0.6315 - lr: 1.0000e-04
Epoch 9/30
707/708 [============================>.] - ETA: 0s - loss: 1.0035 - accuracy: 0.6231
Epoch 9: val_loss improved from 0.99490 to 0.94240, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 1.0035 - accuracy: 0.6231 - val_loss: 0.9424 - val_accuracy: 0.6496 - lr: 1.0000e-04
Epoch 10/30
707/708 [============================>.] - ETA: 0s - loss: 0.9376 - accuracy: 0.6474
Epoch 10: val_loss improved from 0.94240 to 0.90611, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.9374 - accuracy: 0.6475 - val_loss: 0.9061 - val_accuracy: 0.6649 - lr: 1.0000e-04
Epoch 11/30
707/708 [============================>.] - ETA: 0s - loss: 0.8783 - accuracy: 0.6721
Epoch 11: val_loss improved from 0.90611 to 0.83498, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.8784 - accuracy: 0.6720 - val_loss: 0.8350 - val_accuracy: 0.6915 - lr: 1.0000e-04
Epoch 12/30
707/708 [============================>.] - ETA: 0s - loss: 0.8190 - accuracy: 0.6976
Epoch 12: val_loss improved from 0.83498 to 0.79002, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.8188 - accuracy: 0.6977 - val_loss: 0.7900 - val_accuracy: 0.7082 - lr: 1.0000e-04
Epoch 13/30
706/708 [============================>.] - ETA: 0s - loss: 0.7533 - accuracy: 0.7223
Epoch 13: val_loss improved from 0.79002 to 0.77463, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.7527 - accuracy: 0.7225 - val_loss: 0.7746 - val_accuracy: 0.7245 - lr: 1.0000e-04
Epoch 14/30
706/708 [============================>.] - ETA: 0s - loss: 0.7026 - accuracy: 0.7409
Epoch 14: val_loss improved from 0.77463 to 0.73334, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.7026 - accuracy: 0.7410 - val_loss: 0.7333 - val_accuracy: 0.7402 - lr: 1.0000e-04
Epoch 15/30
707/708 [============================>.] - ETA: 0s - loss: 0.6465 - accuracy: 0.7630
Epoch 15: val_loss improved from 0.73334 to 0.68552, saving model to best-model.h5
708/708 [==============================] - 20s 28ms/step - loss: 0.6463 - accuracy: 0.7632 - val_loss: 0.6855 - val_accuracy: 0.7612 - lr: 1.0000e-04
Epoch 16/30
707/708 [============================>.] - ETA: 0s - loss: 0.5960 - accuracy: 0.7830
Epoch 16: val_loss improved from 0.68552 to 0.66259, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.5964 - accuracy: 0.7828 - val_loss: 0.6626 - val_accuracy: 0.7727 - lr: 1.0000e-04
Epoch 17/30
707/708 [============================>.] - ETA: 0s - loss: 0.5496 - accuracy: 0.8021
Epoch 17: val_loss improved from 0.66259 to 0.64119, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.5498 - accuracy: 0.8021 - val_loss: 0.6412 - val_accuracy: 0.7868 - lr: 1.0000e-04
Epoch 18/30
707/708 [============================>.] - ETA: 0s - loss: 0.5069 - accuracy: 0.8181
Epoch 18: val_loss improved from 0.64119 to 0.61912, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.5068 - accuracy: 0.8182 - val_loss: 0.6191 - val_accuracy: 0.7942 - lr: 1.0000e-04
Epoch 19/30
707/708 [============================>.] - ETA: 0s - loss: 0.4644 - accuracy: 0.8329
Epoch 19: val_loss improved from 0.61912 to 0.61613, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.4644 - accuracy: 0.8329 - val_loss: 0.6161 - val_accuracy: 0.8023 - lr: 1.0000e-04
Epoch 20/30
708/708 [==============================] - ETA: 0s - loss: 0.4249 - accuracy: 0.8490
Epoch 20: val_loss improved from 0.61613 to 0.60232, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.4249 - accuracy: 0.8490 - val_loss: 0.6023 - val_accuracy: 0.8111 - lr: 1.0000e-04
Epoch 21/30
706/708 [============================>.] - ETA: 0s - loss: 0.3885 - accuracy: 0.8610
Epoch 21: val_loss improved from 0.60232 to 0.59095, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.3887 - accuracy: 0.8610 - val_loss: 0.5910 - val_accuracy: 0.8224 - lr: 1.0000e-04
Epoch 22/30
707/708 [============================>.] - ETA: 0s - loss: 0.3618 - accuracy: 0.8720
Epoch 22: val_loss did not improve from 0.59095
708/708 [==============================] - 19s 26ms/step - loss: 0.3618 - accuracy: 0.8720 - val_loss: 0.5977 - val_accuracy: 0.8254 - lr: 1.0000e-04
Epoch 23/30
707/708 [============================>.] - ETA: 0s - loss: 0.3344 - accuracy: 0.8808
Epoch 23: val_loss did not improve from 0.59095
708/708 [==============================] - 19s 26ms/step - loss: 0.3348 - accuracy: 0.8807 - val_loss: 0.5999 - val_accuracy: 0.8210 - lr: 1.0000e-04
Epoch 24/30
707/708 [============================>.] - ETA: 0s - loss: 0.3067 - accuracy: 0.8908
Epoch 24: val_loss did not improve from 0.59095

Epoch 24: ReduceLROnPlateau reducing learning rate to 3.1622775802825264e-05.
708/708 [==============================] - 19s 26ms/step - loss: 0.3067 - accuracy: 0.8908 - val_loss: 0.6152 - val_accuracy: 0.8345 - lr: 1.0000e-04
Epoch 25/30
707/708 [============================>.] - ETA: 0s - loss: 0.2517 - accuracy: 0.9106
Epoch 25: val_loss improved from 0.59095 to 0.58899, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.2517 - accuracy: 0.9106 - val_loss: 0.5890 - val_accuracy: 0.8375 - lr: 3.1623e-05
Epoch 26/30
707/708 [============================>.] - ETA: 0s - loss: 0.2240 - accuracy: 0.9222
Epoch 26: val_loss did not improve from 0.58899
708/708 [==============================] - 19s 26ms/step - loss: 0.2241 - accuracy: 0.9222 - val_loss: 0.5913 - val_accuracy: 0.8413 - lr: 3.1623e-05
Epoch 27/30
707/708 [============================>.] - ETA: 0s - loss: 0.2101 - accuracy: 0.9264
Epoch 27: val_loss did not improve from 0.58899
708/708 [==============================] - 19s 27ms/step - loss: 0.2102 - accuracy: 0.9263 - val_loss: 0.5945 - val_accuracy: 0.8458 - lr: 3.1623e-05
Epoch 28/30
707/708 [============================>.] - ETA: 0s - loss: 0.1973 - accuracy: 0.9309
Epoch 28: val_loss did not improve from 0.58899

Epoch 28: ReduceLROnPlateau reducing learning rate to 9.999999259090306e-06.
708/708 [==============================] - 19s 27ms/step - loss: 0.1973 - accuracy: 0.9309 - val_loss: 0.5969 - val_accuracy: 0.8423 - lr: 3.1623e-05
Epoch 29/30
706/708 [============================>.] - ETA: 0s - loss: 0.1821 - accuracy: 0.9349
Epoch 29: val_loss did not improve from 0.58899
708/708 [==============================] - 19s 26ms/step - loss: 0.1821 - accuracy: 0.9349 - val_loss: 0.6006 - val_accuracy: 0.8403 - lr: 1.0000e-05
Epoch 30/30
707/708 [============================>.] - ETA: 0s - loss: 0.1734 - accuracy: 0.9398Restoring model weights from the end of the best epoch: 27.

Epoch 30: val_loss did not improve from 0.58899
708/708 [==============================] - 19s 27ms/step - loss: 0.1736 - accuracy: 0.9398 - val_loss: 0.5989 - val_accuracy: 0.8423 - lr: 1.0000e-05
Epoch 30: early stopping
394/394 [==============================] - 3s 6ms/step - loss: 0.6234 - accuracy: 0.8416
Out[ ]:
[0.6234413385391235, 0.8416368961334229]

CNN Model 2 with Leaky ReLu

In [ ]:
def model_3():
  model = models.Sequential()

  # 1st convolutional layer
  model.add(layers.Conv2D(64, kernel_size=3, strides=1, padding='same', activation=tf.keras.layers.LeakyReLU(), input_shape=(x_train.shape[1:])))
  model.add(layers.BatchNormalization())
  model.add(layers.MaxPooling2D((2, 2)))
  model.add(layers.Dropout(0.25))

  # 2nd convolutional layer
  model.add(layers.Conv2D(128, kernel_size=5, strides=1, padding='same', activation=tf.keras.layers.LeakyReLU()))
  model.add(layers.BatchNormalization())
  model.add(layers.MaxPooling2D((2, 2)))
  model.add(layers.Dropout(0.25))

  # 3rd convolutional layer
  model.add(layers.Conv2D(512, kernel_size=3, strides=1, padding='same', activation=tf.keras.layers.LeakyReLU()))
  model.add(layers.BatchNormalization())
  model.add(layers.MaxPooling2D((2, 2)))
  model.add(layers.Dropout(0.25))

  # 4th convolutional layer
  model.add(layers.Conv2D(512, kernel_size=3, strides=1, padding='same', activation=tf.keras.layers.LeakyReLU()))
  model.add(layers.BatchNormalization())
  model.add(layers.MaxPooling2D((2, 2)))
  model.add(layers.Dropout(0.25))
  model.add(layers.Flatten())

  # 1st fully connected dense layer
  model.add(layers.Dense(256, activation=tf.keras.layers.LeakyReLU()))
  model.add(layers.BatchNormalization())
  model.add(layers.Dropout(0.25))

  # 2nd fully connected dense layer
  model.add(layers.Dense(512, activation=tf.keras.layers.LeakyReLU()))
  model.add(layers.BatchNormalization())
  model.add(layers.Dropout(0.25))

  # final dense layer with number of classes
  model.add(layers.Dense(7, activation='softmax'))

  # compile the model
  model.compile(optimizer=optimizers.Adam(0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
  return model
In [ ]:
model = model_3()
plot_model(model, to_file="model.png", show_shapes=True, show_layer_names=True)
Out[ ]:
In [ ]:
history3 = train_model(model, 30, 64)
model.evaluate(x_test, y_test)
Epoch 1/30
708/708 [==============================] - ETA: 0s - loss: 2.1218 - accuracy: 0.2501
Epoch 1: val_loss improved from inf to 1.83007, saving model to best-model.h5
708/708 [==============================] - 31s 28ms/step - loss: 2.1218 - accuracy: 0.2501 - val_loss: 1.8301 - val_accuracy: 0.3192 - lr: 1.0000e-04
Epoch 2/30
707/708 [============================>.] - ETA: 0s - loss: 1.7720 - accuracy: 0.3515
Epoch 2: val_loss improved from 1.83007 to 1.65118, saving model to best-model.h5
708/708 [==============================] - 20s 29ms/step - loss: 1.7718 - accuracy: 0.3514 - val_loss: 1.6512 - val_accuracy: 0.3977 - lr: 1.0000e-04
Epoch 3/30
707/708 [============================>.] - ETA: 0s - loss: 1.5677 - accuracy: 0.4252
Epoch 3: val_loss improved from 1.65118 to 1.31767, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 1.5678 - accuracy: 0.4252 - val_loss: 1.3177 - val_accuracy: 0.5079 - lr: 1.0000e-04
Epoch 4/30
708/708 [==============================] - ETA: 0s - loss: 1.4088 - accuracy: 0.4773
Epoch 4: val_loss improved from 1.31767 to 1.27712, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 1.4088 - accuracy: 0.4773 - val_loss: 1.2771 - val_accuracy: 0.5282 - lr: 1.0000e-04
Epoch 5/30
707/708 [============================>.] - ETA: 0s - loss: 1.2937 - accuracy: 0.5212
Epoch 5: val_loss improved from 1.27712 to 1.19354, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 1.2935 - accuracy: 0.5213 - val_loss: 1.1935 - val_accuracy: 0.5556 - lr: 1.0000e-04
Epoch 6/30
707/708 [============================>.] - ETA: 0s - loss: 1.1957 - accuracy: 0.5550
Epoch 6: val_loss improved from 1.19354 to 1.08240, saving model to best-model.h5
708/708 [==============================] - 20s 28ms/step - loss: 1.1956 - accuracy: 0.5550 - val_loss: 1.0824 - val_accuracy: 0.5999 - lr: 1.0000e-04
Epoch 7/30
707/708 [============================>.] - ETA: 0s - loss: 1.1103 - accuracy: 0.5875
Epoch 7: val_loss improved from 1.08240 to 1.01645, saving model to best-model.h5
708/708 [==============================] - 20s 28ms/step - loss: 1.1101 - accuracy: 0.5876 - val_loss: 1.0165 - val_accuracy: 0.6307 - lr: 1.0000e-04
Epoch 8/30
707/708 [============================>.] - ETA: 0s - loss: 1.0421 - accuracy: 0.6109
Epoch 8: val_loss improved from 1.01645 to 0.95606, saving model to best-model.h5
708/708 [==============================] - 20s 28ms/step - loss: 1.0422 - accuracy: 0.6109 - val_loss: 0.9561 - val_accuracy: 0.6395 - lr: 1.0000e-04
Epoch 9/30
707/708 [============================>.] - ETA: 0s - loss: 0.9763 - accuracy: 0.6356
Epoch 9: val_loss improved from 0.95606 to 0.90434, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.9765 - accuracy: 0.6355 - val_loss: 0.9043 - val_accuracy: 0.6669 - lr: 1.0000e-04
Epoch 10/30
707/708 [============================>.] - ETA: 0s - loss: 0.9133 - accuracy: 0.6594
Epoch 10: val_loss improved from 0.90434 to 0.87893, saving model to best-model.h5
708/708 [==============================] - 20s 28ms/step - loss: 0.9131 - accuracy: 0.6595 - val_loss: 0.8789 - val_accuracy: 0.6756 - lr: 1.0000e-04
Epoch 11/30
707/708 [============================>.] - ETA: 0s - loss: 0.8608 - accuracy: 0.6811
Epoch 11: val_loss improved from 0.87893 to 0.83538, saving model to best-model.h5
708/708 [==============================] - 20s 28ms/step - loss: 0.8609 - accuracy: 0.6810 - val_loss: 0.8354 - val_accuracy: 0.6953 - lr: 1.0000e-04
Epoch 12/30
707/708 [============================>.] - ETA: 0s - loss: 0.8075 - accuracy: 0.7013
Epoch 12: val_loss improved from 0.83538 to 0.83097, saving model to best-model.h5
708/708 [==============================] - 19s 28ms/step - loss: 0.8076 - accuracy: 0.7012 - val_loss: 0.8310 - val_accuracy: 0.7050 - lr: 1.0000e-04
Epoch 13/30
707/708 [============================>.] - ETA: 0s - loss: 0.7508 - accuracy: 0.7258
Epoch 13: val_loss improved from 0.83097 to 0.75781, saving model to best-model.h5
708/708 [==============================] - 20s 28ms/step - loss: 0.7508 - accuracy: 0.7258 - val_loss: 0.7578 - val_accuracy: 0.7318 - lr: 1.0000e-04
Epoch 14/30
707/708 [============================>.] - ETA: 0s - loss: 0.7057 - accuracy: 0.7425
Epoch 14: val_loss did not improve from 0.75781
708/708 [==============================] - 19s 27ms/step - loss: 0.7055 - accuracy: 0.7425 - val_loss: 0.7698 - val_accuracy: 0.7288 - lr: 1.0000e-04
Epoch 15/30
707/708 [============================>.] - ETA: 0s - loss: 0.6581 - accuracy: 0.7579
Epoch 15: val_loss improved from 0.75781 to 0.70808, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.6579 - accuracy: 0.7580 - val_loss: 0.7081 - val_accuracy: 0.7582 - lr: 1.0000e-04
Epoch 16/30
707/708 [============================>.] - ETA: 0s - loss: 0.6108 - accuracy: 0.7788
Epoch 16: val_loss did not improve from 0.70808
708/708 [==============================] - 19s 27ms/step - loss: 0.6109 - accuracy: 0.7786 - val_loss: 0.7470 - val_accuracy: 0.7481 - lr: 1.0000e-04
Epoch 17/30
707/708 [============================>.] - ETA: 0s - loss: 0.5711 - accuracy: 0.7948
Epoch 17: val_loss improved from 0.70808 to 0.70717, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.5714 - accuracy: 0.7947 - val_loss: 0.7072 - val_accuracy: 0.7642 - lr: 1.0000e-04
Epoch 18/30
707/708 [============================>.] - ETA: 0s - loss: 0.5330 - accuracy: 0.8055
Epoch 18: val_loss improved from 0.70717 to 0.66166, saving model to best-model.h5
708/708 [==============================] - 19s 28ms/step - loss: 0.5333 - accuracy: 0.8054 - val_loss: 0.6617 - val_accuracy: 0.7872 - lr: 1.0000e-04
Epoch 19/30
707/708 [============================>.] - ETA: 0s - loss: 0.4945 - accuracy: 0.8224
Epoch 19: val_loss did not improve from 0.66166
708/708 [==============================] - 19s 27ms/step - loss: 0.4946 - accuracy: 0.8224 - val_loss: 0.6820 - val_accuracy: 0.7863 - lr: 1.0000e-04
Epoch 20/30
707/708 [============================>.] - ETA: 0s - loss: 0.4651 - accuracy: 0.8329
Epoch 20: val_loss did not improve from 0.66166
708/708 [==============================] - 19s 27ms/step - loss: 0.4649 - accuracy: 0.8330 - val_loss: 0.6723 - val_accuracy: 0.7926 - lr: 1.0000e-04
Epoch 21/30
708/708 [==============================] - ETA: 0s - loss: 0.4408 - accuracy: 0.8440
Epoch 21: val_loss improved from 0.66166 to 0.65541, saving model to best-model.h5
708/708 [==============================] - 19s 28ms/step - loss: 0.4408 - accuracy: 0.8440 - val_loss: 0.6554 - val_accuracy: 0.8012 - lr: 1.0000e-04
Epoch 22/30
707/708 [============================>.] - ETA: 0s - loss: 0.4069 - accuracy: 0.8554
Epoch 22: val_loss did not improve from 0.65541
708/708 [==============================] - 19s 27ms/step - loss: 0.4069 - accuracy: 0.8554 - val_loss: 0.6766 - val_accuracy: 0.7996 - lr: 1.0000e-04
Epoch 23/30
707/708 [============================>.] - ETA: 0s - loss: 0.3727 - accuracy: 0.8666
Epoch 23: val_loss improved from 0.65541 to 0.63402, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.3725 - accuracy: 0.8667 - val_loss: 0.6340 - val_accuracy: 0.8125 - lr: 1.0000e-04
Epoch 24/30
707/708 [============================>.] - ETA: 0s - loss: 0.3521 - accuracy: 0.8752
Epoch 24: val_loss did not improve from 0.63402
708/708 [==============================] - 19s 27ms/step - loss: 0.3522 - accuracy: 0.8752 - val_loss: 0.6514 - val_accuracy: 0.8113 - lr: 1.0000e-04
Epoch 25/30
707/708 [============================>.] - ETA: 0s - loss: 0.3358 - accuracy: 0.8824
Epoch 25: val_loss improved from 0.63402 to 0.61536, saving model to best-model.h5
708/708 [==============================] - 19s 28ms/step - loss: 0.3356 - accuracy: 0.8825 - val_loss: 0.6154 - val_accuracy: 0.8196 - lr: 1.0000e-04
Epoch 26/30
707/708 [============================>.] - ETA: 0s - loss: 0.3156 - accuracy: 0.8876
Epoch 26: val_loss did not improve from 0.61536
708/708 [==============================] - 19s 27ms/step - loss: 0.3155 - accuracy: 0.8876 - val_loss: 0.6564 - val_accuracy: 0.8149 - lr: 1.0000e-04
Epoch 27/30
707/708 [============================>.] - ETA: 0s - loss: 0.2953 - accuracy: 0.8956
Epoch 27: val_loss did not improve from 0.61536
708/708 [==============================] - 19s 27ms/step - loss: 0.2954 - accuracy: 0.8955 - val_loss: 0.6456 - val_accuracy: 0.8220 - lr: 1.0000e-04
Epoch 28/30
707/708 [============================>.] - ETA: 0s - loss: 0.2780 - accuracy: 0.9001
Epoch 28: val_loss did not improve from 0.61536

Epoch 28: ReduceLROnPlateau reducing learning rate to 3.1622775802825264e-05.
708/708 [==============================] - 19s 27ms/step - loss: 0.2778 - accuracy: 0.9002 - val_loss: 0.6661 - val_accuracy: 0.8184 - lr: 1.0000e-04
Epoch 29/30
707/708 [============================>.] - ETA: 0s - loss: 0.2288 - accuracy: 0.9198
Epoch 29: val_loss did not improve from 0.61536
708/708 [==============================] - 19s 27ms/step - loss: 0.2287 - accuracy: 0.9198 - val_loss: 0.6326 - val_accuracy: 0.8292 - lr: 3.1623e-05
Epoch 30/30
707/708 [============================>.] - ETA: 0s - loss: 0.2092 - accuracy: 0.9271
Epoch 30: val_loss did not improve from 0.61536
708/708 [==============================] - 19s 27ms/step - loss: 0.2092 - accuracy: 0.9271 - val_loss: 0.6329 - val_accuracy: 0.8333 - lr: 3.1623e-05
394/394 [==============================] - 3s 6ms/step - loss: 0.6354 - accuracy: 0.8319
Out[ ]:
[0.6354373097419739, 0.8318633437156677]

CNN Model 2 with Data Augmentation

In [49]:
train_datagen = ImageDataGenerator(
    rotation_range=35,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.4,
    zoom_range=0.4,
    fill_mode='nearest'
)

plot_augmentation_samples(train_datagen)
Data Augmentation Samples has been saved
In [ ]:
model = model_2()

history4 = train_augmented_model(model, train_datagen, 80, 128)
model.evaluate(x_test, y_test)
Epoch 1/80
352/353 [============================>.] - ETA: 0s - loss: 2.4151 - accuracy: 0.1679
Epoch 1: val_loss improved from inf to 2.17855, saving model to best-model.h5
353/353 [==============================] - 18s 48ms/step - loss: 2.4149 - accuracy: 0.1679 - val_loss: 2.1786 - val_accuracy: 0.1263 - lr: 1.0000e-04
Epoch 2/80
352/353 [============================>.] - ETA: 0s - loss: 2.1947 - accuracy: 0.1784
Epoch 2: val_loss improved from 2.17855 to 2.04914, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 2.1945 - accuracy: 0.1785 - val_loss: 2.0491 - val_accuracy: 0.1780 - lr: 1.0000e-04
Epoch 3/80
352/353 [============================>.] - ETA: 0s - loss: 2.1224 - accuracy: 0.1852
Epoch 3: val_loss improved from 2.04914 to 1.90840, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 2.1224 - accuracy: 0.1853 - val_loss: 1.9084 - val_accuracy: 0.2304 - lr: 1.0000e-04
Epoch 4/80
352/353 [============================>.] - ETA: 0s - loss: 2.0820 - accuracy: 0.1900
Epoch 4: val_loss improved from 1.90840 to 1.85000, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 2.0817 - accuracy: 0.1900 - val_loss: 1.8500 - val_accuracy: 0.2584 - lr: 1.0000e-04
Epoch 5/80
353/353 [==============================] - ETA: 0s - loss: 2.0474 - accuracy: 0.1988
Epoch 5: val_loss improved from 1.85000 to 1.84210, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 2.0474 - accuracy: 0.1988 - val_loss: 1.8421 - val_accuracy: 0.2576 - lr: 1.0000e-04
Epoch 6/80
353/353 [==============================] - ETA: 0s - loss: 2.0189 - accuracy: 0.2052
Epoch 6: val_loss did not improve from 1.84210
353/353 [==============================] - 17s 48ms/step - loss: 2.0189 - accuracy: 0.2052 - val_loss: 1.8569 - val_accuracy: 0.2586 - lr: 1.0000e-04
Epoch 7/80
353/353 [==============================] - ETA: 0s - loss: 1.9859 - accuracy: 0.2183
Epoch 7: val_loss improved from 1.84210 to 1.79480, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 1.9859 - accuracy: 0.2183 - val_loss: 1.7948 - val_accuracy: 0.2863 - lr: 1.0000e-04
Epoch 8/80
353/353 [==============================] - ETA: 0s - loss: 1.9596 - accuracy: 0.2279
Epoch 8: val_loss improved from 1.79480 to 1.78370, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.9596 - accuracy: 0.2279 - val_loss: 1.7837 - val_accuracy: 0.2938 - lr: 1.0000e-04
Epoch 9/80
352/353 [============================>.] - ETA: 0s - loss: 1.9354 - accuracy: 0.2398
Epoch 9: val_loss improved from 1.78370 to 1.77796, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.9354 - accuracy: 0.2398 - val_loss: 1.7780 - val_accuracy: 0.3000 - lr: 1.0000e-04
Epoch 10/80
352/353 [============================>.] - ETA: 0s - loss: 1.9080 - accuracy: 0.2507
Epoch 10: val_loss improved from 1.77796 to 1.77499, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.9079 - accuracy: 0.2508 - val_loss: 1.7750 - val_accuracy: 0.3063 - lr: 1.0000e-04
Epoch 11/80
353/353 [==============================] - ETA: 0s - loss: 1.8712 - accuracy: 0.2653
Epoch 11: val_loss improved from 1.77499 to 1.71581, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.8712 - accuracy: 0.2653 - val_loss: 1.7158 - val_accuracy: 0.3216 - lr: 1.0000e-04
Epoch 12/80
352/353 [============================>.] - ETA: 0s - loss: 1.8310 - accuracy: 0.2789
Epoch 12: val_loss improved from 1.71581 to 1.70240, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.8312 - accuracy: 0.2791 - val_loss: 1.7024 - val_accuracy: 0.3453 - lr: 1.0000e-04
Epoch 13/80
353/353 [==============================] - ETA: 0s - loss: 1.8039 - accuracy: 0.2915
Epoch 13: val_loss improved from 1.70240 to 1.66093, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.8039 - accuracy: 0.2915 - val_loss: 1.6609 - val_accuracy: 0.3530 - lr: 1.0000e-04
Epoch 14/80
352/353 [============================>.] - ETA: 0s - loss: 1.7600 - accuracy: 0.3138
Epoch 14: val_loss improved from 1.66093 to 1.64650, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.7599 - accuracy: 0.3138 - val_loss: 1.6465 - val_accuracy: 0.3884 - lr: 1.0000e-04
Epoch 15/80
352/353 [============================>.] - ETA: 0s - loss: 1.7225 - accuracy: 0.3311
Epoch 15: val_loss did not improve from 1.64650
353/353 [==============================] - 17s 48ms/step - loss: 1.7221 - accuracy: 0.3313 - val_loss: 1.7554 - val_accuracy: 0.3758 - lr: 1.0000e-04
Epoch 16/80
353/353 [==============================] - ETA: 0s - loss: 1.6864 - accuracy: 0.3464
Epoch 16: val_loss improved from 1.64650 to 1.53418, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.6864 - accuracy: 0.3464 - val_loss: 1.5342 - val_accuracy: 0.4319 - lr: 1.0000e-04
Epoch 17/80
353/353 [==============================] - ETA: 0s - loss: 1.6479 - accuracy: 0.3628
Epoch 17: val_loss did not improve from 1.53418
353/353 [==============================] - 17s 48ms/step - loss: 1.6479 - accuracy: 0.3628 - val_loss: 1.5923 - val_accuracy: 0.4154 - lr: 1.0000e-04
Epoch 18/80
352/353 [============================>.] - ETA: 0s - loss: 1.6029 - accuracy: 0.3839
Epoch 18: val_loss improved from 1.53418 to 1.48758, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.6031 - accuracy: 0.3838 - val_loss: 1.4876 - val_accuracy: 0.4507 - lr: 1.0000e-04
Epoch 19/80
352/353 [============================>.] - ETA: 0s - loss: 1.5732 - accuracy: 0.3981
Epoch 19: val_loss improved from 1.48758 to 1.42675, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.5731 - accuracy: 0.3982 - val_loss: 1.4267 - val_accuracy: 0.4692 - lr: 1.0000e-04
Epoch 20/80
353/353 [==============================] - ETA: 0s - loss: 1.5424 - accuracy: 0.4076
Epoch 20: val_loss improved from 1.42675 to 1.42401, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.5424 - accuracy: 0.4076 - val_loss: 1.4240 - val_accuracy: 0.4760 - lr: 1.0000e-04
Epoch 21/80
352/353 [============================>.] - ETA: 0s - loss: 1.5160 - accuracy: 0.4187
Epoch 21: val_loss improved from 1.42401 to 1.30537, saving model to best-model.h5
353/353 [==============================] - 21s 61ms/step - loss: 1.5158 - accuracy: 0.4189 - val_loss: 1.3054 - val_accuracy: 0.5139 - lr: 1.0000e-04
Epoch 22/80
353/353 [==============================] - ETA: 0s - loss: 1.4908 - accuracy: 0.4288
Epoch 22: val_loss improved from 1.30537 to 1.28653, saving model to best-model.h5
353/353 [==============================] - 20s 56ms/step - loss: 1.4908 - accuracy: 0.4288 - val_loss: 1.2865 - val_accuracy: 0.5226 - lr: 1.0000e-04
Epoch 23/80
353/353 [==============================] - ETA: 0s - loss: 1.4651 - accuracy: 0.4389
Epoch 23: val_loss improved from 1.28653 to 1.27115, saving model to best-model.h5
353/353 [==============================] - 20s 56ms/step - loss: 1.4651 - accuracy: 0.4389 - val_loss: 1.2712 - val_accuracy: 0.5242 - lr: 1.0000e-04
Epoch 24/80
352/353 [============================>.] - ETA: 0s - loss: 1.4444 - accuracy: 0.4489
Epoch 24: val_loss improved from 1.27115 to 1.16613, saving model to best-model.h5
353/353 [==============================] - 19s 55ms/step - loss: 1.4446 - accuracy: 0.4489 - val_loss: 1.1661 - val_accuracy: 0.5626 - lr: 1.0000e-04
Epoch 25/80
352/353 [============================>.] - ETA: 0s - loss: 1.4178 - accuracy: 0.4587
Epoch 25: val_loss did not improve from 1.16613
353/353 [==============================] - 20s 57ms/step - loss: 1.4177 - accuracy: 0.4587 - val_loss: 1.2729 - val_accuracy: 0.5290 - lr: 1.0000e-04
Epoch 26/80
352/353 [============================>.] - ETA: 0s - loss: 1.4027 - accuracy: 0.4650
Epoch 26: val_loss improved from 1.16613 to 1.14209, saving model to best-model.h5
353/353 [==============================] - 20s 58ms/step - loss: 1.4028 - accuracy: 0.4649 - val_loss: 1.1421 - val_accuracy: 0.5747 - lr: 1.0000e-04
Epoch 27/80
353/353 [==============================] - ETA: 0s - loss: 1.3786 - accuracy: 0.4775
Epoch 27: val_loss did not improve from 1.14209
353/353 [==============================] - 20s 58ms/step - loss: 1.3786 - accuracy: 0.4775 - val_loss: 1.1561 - val_accuracy: 0.5675 - lr: 1.0000e-04
Epoch 28/80
353/353 [==============================] - ETA: 0s - loss: 1.3579 - accuracy: 0.4820
Epoch 28: val_loss did not improve from 1.14209
353/353 [==============================] - 17s 49ms/step - loss: 1.3579 - accuracy: 0.4820 - val_loss: 1.1442 - val_accuracy: 0.5739 - lr: 1.0000e-04
Epoch 29/80
353/353 [==============================] - ETA: 0s - loss: 1.3470 - accuracy: 0.4910Restoring model weights from the end of the best epoch: 26.

Epoch 29: val_loss improved from 1.14209 to 1.13374, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.3470 - accuracy: 0.4910 - val_loss: 1.1337 - val_accuracy: 0.5725 - lr: 1.0000e-04
Epoch 29: early stopping
394/394 [==============================] - 2s 6ms/step - loss: 1.1341 - accuracy: 0.5739
Out[ ]:
[1.134113073348999, 0.5738577842712402]

Initial Results

In [43]:
plot_model_history(history1, name="CNN Model 1")
Min validation loss: 0.9167398810386658 
Max validation loss: 1.5393399000167847 
Min validation acc: 0.41815653443336487 
Max validation acc: 0.7904251217842102
In [ ]:
plot_model_history(history2, name="CNN Model 2")
Min validation loss: 0.5889896154403687 
Max validation loss: 2.0088906288146973 
Min validation acc: 0.24374255537986755 
Max validation acc: 0.8458482027053833
In [ ]:
plot_model_history(history3, name="CNN Model 2 with Leaky ReLu")
Min validation loss: 0.6021990776062012 
Max validation loss: 2.0196313858032227 
Min validation acc: 0.2445371448993683 
Max validation acc: 0.8373063206672668
In [ ]:
plot_model_history(history4, name="CNN Model 2 with Data Augmentation")
Min validation loss: 1.1337430477142334 
Max validation loss: 2.1785547733306885 
Min validation acc: 0.12634088099002838 
Max validation acc: 0.5746920704841614

After analysing all the results, Model 2 has proven to be most promising. Therefore, it will be shortlisted for fine-tuning.

Hyperparameter Optimisation

Tuning the activation functions and pooling types

In [ ]:
def create_model(pool_type='max', conv_activation='relu', dropout_rate=0.25):
  model = models.Sequential()

  # 1st convolutional layer
  model.add(layers.Conv2D(64, kernel_size=3, strides=1, padding='same', activation=conv_activation, input_shape=(x_train.shape[1:])))
  model.add(layers.BatchNormalization())
  if pool_type == 'max':
    model.add(layers.MaxPooling2D((2, 2)))
  if pool_type == 'average':
    model.add(layers.AveragePooling2D((2, 2)))
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))    

  # 2nd convolutional layer
  model.add(layers.Conv2D(128, kernel_size=5, strides=1, padding='same', activation=conv_activation))
  model.add(layers.BatchNormalization())
  if pool_type == 'max':
    model.add(layers.MaxPooling2D((2, 2)))
  if pool_type == 'average':
    model.add(layers.AveragePooling2D((2, 2)))
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  # 3rd convolutional layer
  model.add(layers.Conv2D(256, kernel_size=3, strides=1, padding='same', activation=conv_activation))
  model.add(layers.BatchNormalization())
  if pool_type == 'max':
    model.add(layers.MaxPooling2D((2, 2)))
  if pool_type == 'average':
    model.add(layers.AveragePooling2D((2, 2)))
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  # 4th convolutional layer
  model.add(layers.Conv2D(256, kernel_size=3, strides=1, padding='same', activation=conv_activation))
  model.add(layers.BatchNormalization())
  if pool_type == 'max':  
    model.add(layers.MaxPooling2D((2, 2)))
  if pool_type == 'average':
    model.add(layers.AveragePooling2D((2, 2)))
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  model.add(layers.Flatten())

  # 1st fully connected dense layer
  model.add(layers.Dense(256, activation=conv_activation))
  model.add(layers.BatchNormalization())
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  # 2nd fully connected dense layer
  model.add(layers.Dense(256, activation=conv_activation))
  model.add(layers.BatchNormalization())
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  # final dense layer with number of classes
  model.add(layers.Dense(7, activation='softmax'))

  # compile the model
  model.compile(optimizer=optimizers.Adam(0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
  return model
In [ ]:
# wrapping the existing model around KerasClassifier to use it with scikit-learn
model = KerasClassifier(build_fn=create_model, pool_type='max', conv_activation='relu', dropout_rate=0.25, verbose=1)

# specifying the hyperparameters to be tuned during the grid search 
param_grid = {
    'pool_type': ['max', 'average'],
    'conv_activation': ['relu', tf.keras.layers.LeakyReLU(), 'tanh'],    
    'epochs': [30],
}

# fitting the model and evaluating the results
grid_searcher = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=3)
grid_searcher_result = grid_searcher.fit(x_train, y_train)
plot_grid_search_results(grid_searcher_result)
/usr/local/lib/python3.7/dist-packages/joblib/externals/loky/process_executor.py:705: UserWarning: A worker stopped while some jobs were given to the executor. This can be caused by a too short worker timeout or by a memory leak.
  "timeout or by a memory leak.", UserWarning
/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py:290: UserWarning: ``build_fn`` will be renamed to ``model`` in a future release, at which point use of ``build_fn`` will raise an Error instead.
  "``build_fn`` will be renamed to ``model`` in a future release,"
Epoch 1/30
1416/1416 [==============================] - 18s 11ms/step - loss: 2.2184 - accuracy: 0.2192
Epoch 2/30
1416/1416 [==============================] - 15s 10ms/step - loss: 1.8781 - accuracy: 0.3062
Epoch 3/30
1416/1416 [==============================] - 15s 10ms/step - loss: 1.6445 - accuracy: 0.3893
Epoch 4/30
1416/1416 [==============================] - 15s 10ms/step - loss: 1.4567 - accuracy: 0.4571
Epoch 5/30
1416/1416 [==============================] - 15s 10ms/step - loss: 1.3201 - accuracy: 0.5069
Epoch 6/30
1416/1416 [==============================] - 15s 10ms/step - loss: 1.2144 - accuracy: 0.5461
Epoch 7/30
1416/1416 [==============================] - 15s 10ms/step - loss: 1.1278 - accuracy: 0.5776
Epoch 8/30
1416/1416 [==============================] - 15s 10ms/step - loss: 1.0592 - accuracy: 0.6029
Epoch 9/30
1416/1416 [==============================] - 15s 10ms/step - loss: 1.0053 - accuracy: 0.6250
Epoch 10/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.9522 - accuracy: 0.6444
Epoch 11/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.9075 - accuracy: 0.6607
Epoch 12/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.8627 - accuracy: 0.6782
Epoch 13/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.8262 - accuracy: 0.6938
Epoch 14/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.7885 - accuracy: 0.7070
Epoch 15/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.7523 - accuracy: 0.7236
Epoch 16/30
1416/1416 [==============================] - 15s 11ms/step - loss: 0.7189 - accuracy: 0.7374
Epoch 17/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.6873 - accuracy: 0.7483
Epoch 18/30
1416/1416 [==============================] - 15s 11ms/step - loss: 0.6593 - accuracy: 0.7576
Epoch 19/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.6233 - accuracy: 0.7750
Epoch 20/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.6011 - accuracy: 0.7815
Epoch 21/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.5733 - accuracy: 0.7929
Epoch 22/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.5470 - accuracy: 0.7998
Epoch 23/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.5272 - accuracy: 0.8117
Epoch 24/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.5046 - accuracy: 0.8180
Epoch 25/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.4824 - accuracy: 0.8254
Epoch 26/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.4662 - accuracy: 0.8337
Epoch 27/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.4502 - accuracy: 0.8398
Epoch 28/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.4390 - accuracy: 0.8434
Epoch 29/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.4211 - accuracy: 0.8478
Epoch 30/30
1416/1416 [==============================] - 15s 10ms/step - loss: 0.4092 - accuracy: 0.8535

Best score = 0.76 using {'conv_activation': 'relu', 'epochs': 30, 'pool_type': 'average'}

394/394 [==============================] - 2s 3ms/step
mean test accuracy +/- std = 0.7515 +/- 0.0096 with: {'conv_activation': 'relu', 'epochs': 30, 'pool_type': 'max'}
mean test accuracy +/- std = 0.7629 +/- 0.0031 with: {'conv_activation': 'relu', 'epochs': 30, 'pool_type': 'average'}
mean test accuracy +/- std = 0.7486 +/- 0.0021 with: {'conv_activation': <keras.layers.advanced_activations.LeakyReLU object at 0x7fdc431aad50>, 'epochs': 30, 'pool_type': 'max'}
mean test accuracy +/- std = 0.6814 +/- 0.0067 with: {'conv_activation': <keras.layers.advanced_activations.LeakyReLU object at 0x7fdc431aad50>, 'epochs': 30, 'pool_type': 'average'}
mean test accuracy +/- std = 0.6862 +/- 0.0048 with: {'conv_activation': 'tanh', 'epochs': 30, 'pool_type': 'max'}
mean test accuracy +/- std = 0.7000 +/- 0.0054 with: {'conv_activation': 'tanh', 'epochs': 30, 'pool_type': 'average'}

Accuracy achieved on the best model
81.89%

Tuning the kernel size and dropout regularisation

In [ ]:
def create_model(kernel_size=3, dropout_rate=0.25):
  model = models.Sequential()

  # 1st convolutional layer
  model.add(layers.Conv2D(64, kernel_size=kernel_size, strides=1, padding='same', activation='relu', input_shape=(x_train.shape[1:])))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))    

  # 2nd convolutional layer
  model.add(layers.Conv2D(128, kernel_size=5, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  # 3rd convolutional layer
  model.add(layers.Conv2D(256, kernel_size=kernel_size, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  # 4th convolutional layer
  model.add(layers.Conv2D(256, kernel_size=kernel_size, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  model.add(layers.Flatten())

  # 1st fully connected dense layer
  model.add(layers.Dense(256, activation='relu'))
  model.add(layers.BatchNormalization())
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  # 2nd fully connected dense layer
  model.add(layers.Dense(256, activation='relu'))
  model.add(layers.BatchNormalization())
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  # final dense layer with number of classes
  model.add(layers.Dense(7, activation='softmax'))

  # compile the model
  model.compile(optimizer=optimizers.Adam(0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
  return model
In [ ]:
# wrapping the existing model around KerasClassifier to use it with scikit-learn
model = KerasClassifier(build_fn=create_model, kernel_size=3, dropout_rate=0.25, verbose=1)

# specifying the hyperparameters to be tuned during the grid search 
param_grid = {
    'kernel_size': [3, 5, 7],
    'dropout_rate': [0.2, 0.25, 0.3, 0.35],
    'epochs': [30],
    'batch_size': [64, 128],
}

# fitting the model and evaluating the results
grid_searcher = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=3)
grid_searcher_result = grid_searcher.fit(x_train, y_train)
plot_grid_search_results(grid_searcher_result)
/usr/local/lib/python3.7/dist-packages/joblib/externals/loky/process_executor.py:705: UserWarning: A worker stopped while some jobs were given to the executor. This can be caused by a too short worker timeout or by a memory leak.
  "timeout or by a memory leak.", UserWarning
/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py:290: UserWarning: ``build_fn`` will be renamed to ``model`` in a future release, at which point use of ``build_fn`` will raise an Error instead.
  "``build_fn`` will be renamed to ``model`` in a future release,"
Epoch 1/30
354/354 [==============================] - 11s 25ms/step - loss: 2.1357 - accuracy: 0.2430
Epoch 2/30
354/354 [==============================] - 8s 24ms/step - loss: 1.7797 - accuracy: 0.3546
Epoch 3/30
354/354 [==============================] - 8s 24ms/step - loss: 1.5331 - accuracy: 0.4419
Epoch 4/30
354/354 [==============================] - 8s 24ms/step - loss: 1.3613 - accuracy: 0.4999
Epoch 5/30
354/354 [==============================] - 8s 24ms/step - loss: 1.2353 - accuracy: 0.5421
Epoch 6/30
354/354 [==============================] - 8s 24ms/step - loss: 1.1381 - accuracy: 0.5813
Epoch 7/30
354/354 [==============================] - 8s 24ms/step - loss: 1.0502 - accuracy: 0.6088
Epoch 8/30
354/354 [==============================] - 8s 24ms/step - loss: 0.9773 - accuracy: 0.6392
Epoch 9/30
354/354 [==============================] - 8s 24ms/step - loss: 0.9136 - accuracy: 0.6638
Epoch 10/30
354/354 [==============================] - 8s 24ms/step - loss: 0.8483 - accuracy: 0.6879
Epoch 11/30
354/354 [==============================] - 8s 24ms/step - loss: 0.7951 - accuracy: 0.7080
Epoch 12/30
354/354 [==============================] - 8s 24ms/step - loss: 0.7383 - accuracy: 0.7301
Epoch 13/30
354/354 [==============================] - 8s 24ms/step - loss: 0.6926 - accuracy: 0.7489
Epoch 14/30
354/354 [==============================] - 8s 24ms/step - loss: 0.6388 - accuracy: 0.7697
Epoch 15/30
354/354 [==============================] - 8s 24ms/step - loss: 0.5879 - accuracy: 0.7885
Epoch 16/30
354/354 [==============================] - 8s 24ms/step - loss: 0.5482 - accuracy: 0.8049
Epoch 17/30
354/354 [==============================] - 8s 24ms/step - loss: 0.4958 - accuracy: 0.8241
Epoch 18/30
354/354 [==============================] - 8s 24ms/step - loss: 0.4542 - accuracy: 0.8385
Epoch 19/30
354/354 [==============================] - 8s 24ms/step - loss: 0.4140 - accuracy: 0.8532
Epoch 20/30
354/354 [==============================] - 8s 24ms/step - loss: 0.3834 - accuracy: 0.8642
Epoch 21/30
354/354 [==============================] - 8s 24ms/step - loss: 0.3483 - accuracy: 0.8770
Epoch 22/30
354/354 [==============================] - 8s 24ms/step - loss: 0.3257 - accuracy: 0.8841
Epoch 23/30
354/354 [==============================] - 8s 24ms/step - loss: 0.3013 - accuracy: 0.8942
Epoch 24/30
354/354 [==============================] - 8s 24ms/step - loss: 0.2706 - accuracy: 0.9050
Epoch 25/30
354/354 [==============================] - 8s 24ms/step - loss: 0.2500 - accuracy: 0.9106
Epoch 26/30
354/354 [==============================] - 8s 24ms/step - loss: 0.2282 - accuracy: 0.9208
Epoch 27/30
354/354 [==============================] - 8s 24ms/step - loss: 0.2141 - accuracy: 0.9255
Epoch 28/30
354/354 [==============================] - 8s 24ms/step - loss: 0.2019 - accuracy: 0.9307
Epoch 29/30
354/354 [==============================] - 8s 24ms/step - loss: 0.1895 - accuracy: 0.9342
Epoch 30/30
354/354 [==============================] - 8s 24ms/step - loss: 0.1757 - accuracy: 0.9399
Best score = 0.77 using {'batch_size': 128, 'dropout_rate': 0.2, 'epochs': 30, 'kernel_size': 5}

99/99 [==============================] - 1s 8ms/step
mean test accuracy +/- std = 0.7654 +/- 0.0032 with: {'batch_size': 64, 'dropout_rate': 0.2, 'epochs': 30, 'kernel_size': 3}
mean test accuracy +/- std = 0.7696 +/- 0.0012 with: {'batch_size': 64, 'dropout_rate': 0.2, 'epochs': 30, 'kernel_size': 5}
mean test accuracy +/- std = 0.7603 +/- 0.0047 with: {'batch_size': 64, 'dropout_rate': 0.2, 'epochs': 30, 'kernel_size': 7}
mean test accuracy +/- std = 0.7426 +/- 0.0040 with: {'batch_size': 64, 'dropout_rate': 0.25, 'epochs': 30, 'kernel_size': 3}
mean test accuracy +/- std = 0.7689 +/- 0.0036 with: {'batch_size': 64, 'dropout_rate': 0.25, 'epochs': 30, 'kernel_size': 5}
mean test accuracy +/- std = 0.7636 +/- 0.0023 with: {'batch_size': 64, 'dropout_rate': 0.25, 'epochs': 30, 'kernel_size': 7}
mean test accuracy +/- std = 0.7127 +/- 0.0012 with: {'batch_size': 64, 'dropout_rate': 0.3, 'epochs': 30, 'kernel_size': 3}
mean test accuracy +/- std = 0.7625 +/- 0.0018 with: {'batch_size': 64, 'dropout_rate': 0.3, 'epochs': 30, 'kernel_size': 5}
mean test accuracy +/- std = 0.7677 +/- 0.0038 with: {'batch_size': 64, 'dropout_rate': 0.3, 'epochs': 30, 'kernel_size': 7}
mean test accuracy +/- std = 0.6865 +/- 0.0077 with: {'batch_size': 64, 'dropout_rate': 0.35, 'epochs': 30, 'kernel_size': 3}
mean test accuracy +/- std = 0.7373 +/- 0.0093 with: {'batch_size': 64, 'dropout_rate': 0.35, 'epochs': 30, 'kernel_size': 5}
mean test accuracy +/- std = 0.7626 +/- 0.0059 with: {'batch_size': 64, 'dropout_rate': 0.35, 'epochs': 30, 'kernel_size': 7}
mean test accuracy +/- std = 0.7378 +/- 0.0032 with: {'batch_size': 128, 'dropout_rate': 0.2, 'epochs': 30, 'kernel_size': 3}
mean test accuracy +/- std = 0.7701 +/- 0.0056 with: {'batch_size': 128, 'dropout_rate': 0.2, 'epochs': 30, 'kernel_size': 5}
mean test accuracy +/- std = 0.7597 +/- 0.0025 with: {'batch_size': 128, 'dropout_rate': 0.2, 'epochs': 30, 'kernel_size': 7}
mean test accuracy +/- std = 0.7061 +/- 0.0090 with: {'batch_size': 128, 'dropout_rate': 0.25, 'epochs': 30, 'kernel_size': 3}
mean test accuracy +/- std = 0.7544 +/- 0.0058 with: {'batch_size': 128, 'dropout_rate': 0.25, 'epochs': 30, 'kernel_size': 5}
mean test accuracy +/- std = 0.7617 +/- 0.0025 with: {'batch_size': 128, 'dropout_rate': 0.25, 'epochs': 30, 'kernel_size': 7}
mean test accuracy +/- std = 0.6794 +/- 0.0053 with: {'batch_size': 128, 'dropout_rate': 0.3, 'epochs': 30, 'kernel_size': 3}
mean test accuracy +/- std = 0.7261 +/- 0.0066 with: {'batch_size': 128, 'dropout_rate': 0.3, 'epochs': 30, 'kernel_size': 5}
mean test accuracy +/- std = 0.7596 +/- 0.0012 with: {'batch_size': 128, 'dropout_rate': 0.3, 'epochs': 30, 'kernel_size': 7}
mean test accuracy +/- std = 0.6521 +/- 0.0059 with: {'batch_size': 128, 'dropout_rate': 0.35, 'epochs': 30, 'kernel_size': 3}
mean test accuracy +/- std = 0.6997 +/- 0.0024 with: {'batch_size': 128, 'dropout_rate': 0.35, 'epochs': 30, 'kernel_size': 5}
mean test accuracy +/- std = 0.7321 +/- 0.0095 with: {'batch_size': 128, 'dropout_rate': 0.35, 'epochs': 30, 'kernel_size': 7}

Accuracy achieved on the best model
83.35%

Tuning the padding

In [ ]:
def create_model(padding='same', dropout_rate=0.2):
  model = models.Sequential()

  # 1st convolutional layer
  model.add(layers.Conv2D(64, kernel_size=5, strides=1, padding=padding, activation='relu', input_shape=(48, 48, 1)))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))    

  # 2nd convolutional layer
  model.add(layers.Conv2D(128, kernel_size=5, strides=1, padding=padding, activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  # 3rd convolutional layer
  model.add(layers.Conv2D(256, kernel_size=5, strides=1, padding=padding, activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  # 4th convolutional layer
  model.add(layers.Conv2D(256, kernel_size=5, strides=1, padding=padding, activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  model.add(layers.Flatten())

  # 1st fully connected dense layer
  model.add(layers.Dense(256, activation='relu'))
  model.add(layers.BatchNormalization())
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  # 2nd fully connected dense layer
  model.add(layers.Dense(256, activation='relu'))
  model.add(layers.BatchNormalization())
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  # final dense layer with number of classes
  model.add(layers.Dense(7, activation='softmax'))

  # compile the model
  model.compile(optimizer=optimizers.Adam(0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
  return model
In [ ]:
# wrapping the existing model around KerasClassifier to use it with scikit-learn
model = KerasClassifier(build_fn=create_model, padding='same', dropout_rate=0.2, verbose=1)

# specifying the hyperparameters to be tuned during the grid search 
param_grid = {
    'padding': ['same', 'valid', 'causal'],
    'epochs': [30],
    'batch_size': [128],
}

# fitting the model and evaluating the results
grid_searcher = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=3)
grid_searcher_result = grid_searcher.fit(x_train, y_train)
plot_grid_search_results(grid_searcher_result)
/usr/local/lib/python3.7/dist-packages/sklearn/model_selection/_validation.py:372: FitFailedWarning: 
6 fits failed out of a total of 9.
The score on these train-test partitions for these parameters will be set to nan.
If these failures are not expected, you can try to debug them by setting error_score='raise'.

Below are more details about the failures:
--------------------------------------------------------------------------------
1 fits failed with the following error:
Traceback (most recent call last):
  File "/usr/local/lib/python3.7/dist-packages/sklearn/model_selection/_validation.py", line 680, in _fit_and_score
    estimator.fit(X_train, y_train, **fit_params)
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 1453, in fit
    super().fit(X=X, y=y, sample_weight=sample_weight, **kwargs)
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 726, in fit
    X=X, y=y, sample_weight=sample_weight, warm_start=self.warm_start, **kwargs,
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 877, in _fit
    X, y = self._initialize(X, y)
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 814, in _initialize
    self.model_ = self._build_keras_model()
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 408, in _build_keras_model
    model = final_build_fn(**build_params)
  File "<ipython-input-112-d6a8404a65bf>", line 26, in create_model
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/training/tracking/base.py", line 629, in _method_wrapper
    result = method(self, *args, **kwargs)
  File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 67, in error_handler
    raise e.with_traceback(filtered_tb) from None
  File "/usr/local/lib/python3.7/dist-packages/keras/layers/convolutional.py", line 305, in compute_output_shape
    f'One of the dimensions in the output is <= 0 '
ValueError: One of the dimensions in the output is <= 0 due to downsampling in conv2d_3. Consider increasing the input size. Received input shape [None, 2, 2, 256] which would produce output shape with a zero or negative value in a dimension.

--------------------------------------------------------------------------------
1 fits failed with the following error:
Traceback (most recent call last):
  File "/usr/local/lib/python3.7/dist-packages/sklearn/model_selection/_validation.py", line 680, in _fit_and_score
    estimator.fit(X_train, y_train, **fit_params)
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 1453, in fit
    super().fit(X=X, y=y, sample_weight=sample_weight, **kwargs)
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 726, in fit
    X=X, y=y, sample_weight=sample_weight, warm_start=self.warm_start, **kwargs,
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 877, in _fit
    X, y = self._initialize(X, y)
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 814, in _initialize
    self.model_ = self._build_keras_model()
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 408, in _build_keras_model
    model = final_build_fn(**build_params)
  File "<ipython-input-112-d6a8404a65bf>", line 26, in create_model
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/training/tracking/base.py", line 629, in _method_wrapper
    result = method(self, *args, **kwargs)
  File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 67, in error_handler
    raise e.with_traceback(filtered_tb) from None
  File "/usr/local/lib/python3.7/dist-packages/keras/layers/convolutional.py", line 305, in compute_output_shape
    f'One of the dimensions in the output is <= 0 '
ValueError: One of the dimensions in the output is <= 0 due to downsampling in conv2d_7. Consider increasing the input size. Received input shape [None, 2, 2, 256] which would produce output shape with a zero or negative value in a dimension.

--------------------------------------------------------------------------------
1 fits failed with the following error:
Traceback (most recent call last):
  File "/usr/local/lib/python3.7/dist-packages/sklearn/model_selection/_validation.py", line 680, in _fit_and_score
    estimator.fit(X_train, y_train, **fit_params)
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 1453, in fit
    super().fit(X=X, y=y, sample_weight=sample_weight, **kwargs)
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 726, in fit
    X=X, y=y, sample_weight=sample_weight, warm_start=self.warm_start, **kwargs,
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 877, in _fit
    X, y = self._initialize(X, y)
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 814, in _initialize
    self.model_ = self._build_keras_model()
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 408, in _build_keras_model
    model = final_build_fn(**build_params)
  File "<ipython-input-112-d6a8404a65bf>", line 26, in create_model
  File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/training/tracking/base.py", line 629, in _method_wrapper
    result = method(self, *args, **kwargs)
  File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 67, in error_handler
    raise e.with_traceback(filtered_tb) from None
  File "/usr/local/lib/python3.7/dist-packages/keras/layers/convolutional.py", line 305, in compute_output_shape
    f'One of the dimensions in the output is <= 0 '
ValueError: One of the dimensions in the output is <= 0 due to downsampling in conv2d_11. Consider increasing the input size. Received input shape [None, 2, 2, 256] which would produce output shape with a zero or negative value in a dimension.

--------------------------------------------------------------------------------
3 fits failed with the following error:
Traceback (most recent call last):
  File "/usr/local/lib/python3.7/dist-packages/sklearn/model_selection/_validation.py", line 680, in _fit_and_score
    estimator.fit(X_train, y_train, **fit_params)
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 1453, in fit
    super().fit(X=X, y=y, sample_weight=sample_weight, **kwargs)
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 726, in fit
    X=X, y=y, sample_weight=sample_weight, warm_start=self.warm_start, **kwargs,
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 877, in _fit
    X, y = self._initialize(X, y)
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 814, in _initialize
    self.model_ = self._build_keras_model()
  File "/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py", line 408, in _build_keras_model
    model = final_build_fn(**build_params)
  File "<ipython-input-112-d6a8404a65bf>", line 5, in create_model
  File "/usr/local/lib/python3.7/dist-packages/keras/layers/convolutional.py", line 691, in __init__
    **kwargs)
  File "/usr/local/lib/python3.7/dist-packages/keras/layers/convolutional.py", line 159, in __init__
    self._validate_init()
  File "/usr/local/lib/python3.7/dist-packages/keras/layers/convolutional.py", line 182, in _validate_init
    raise ValueError('Causal padding is only supported for `Conv1D`'
ValueError: Causal padding is only supported for `Conv1D`and `SeparableConv1D`.

  warnings.warn(some_fits_failed_message, FitFailedWarning)
/usr/local/lib/python3.7/dist-packages/sklearn/model_selection/_search.py:972: UserWarning: One or more of the test scores are non-finite: [0.76602515        nan        nan]
  category=UserWarning,
/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py:290: UserWarning: ``build_fn`` will be renamed to ``model`` in a future release, at which point use of ``build_fn`` will raise an Error instead.
  "``build_fn`` will be renamed to ``model`` in a future release,"
Epoch 1/30
354/354 [==============================] - 10s 24ms/step - loss: 2.1119 - accuracy: 0.2486
Epoch 2/30
354/354 [==============================] - 8s 24ms/step - loss: 1.7603 - accuracy: 0.3606
Epoch 3/30
354/354 [==============================] - 8s 24ms/step - loss: 1.5295 - accuracy: 0.4397
Epoch 4/30
354/354 [==============================] - 8s 24ms/step - loss: 1.3559 - accuracy: 0.4989
Epoch 5/30
354/354 [==============================] - 9s 24ms/step - loss: 1.2279 - accuracy: 0.5445
Epoch 6/30
354/354 [==============================] - 8s 24ms/step - loss: 1.1251 - accuracy: 0.5804
Epoch 7/30
354/354 [==============================] - 8s 24ms/step - loss: 1.0421 - accuracy: 0.6133
Epoch 8/30
354/354 [==============================] - 8s 24ms/step - loss: 0.9696 - accuracy: 0.6388
Epoch 9/30
354/354 [==============================] - 9s 24ms/step - loss: 0.9070 - accuracy: 0.6643
Epoch 10/30
354/354 [==============================] - 8s 24ms/step - loss: 0.8489 - accuracy: 0.6857
Epoch 11/30
354/354 [==============================] - 8s 24ms/step - loss: 0.7909 - accuracy: 0.7079
Epoch 12/30
354/354 [==============================] - 8s 24ms/step - loss: 0.7389 - accuracy: 0.7277
Epoch 13/30
354/354 [==============================] - 8s 24ms/step - loss: 0.6904 - accuracy: 0.7483
Epoch 14/30
354/354 [==============================] - 8s 24ms/step - loss: 0.6402 - accuracy: 0.7678
Epoch 15/30
354/354 [==============================] - 8s 24ms/step - loss: 0.5968 - accuracy: 0.7864
Epoch 16/30
354/354 [==============================] - 9s 24ms/step - loss: 0.5423 - accuracy: 0.8058
Epoch 17/30
354/354 [==============================] - 8s 24ms/step - loss: 0.5035 - accuracy: 0.8220
Epoch 18/30
354/354 [==============================] - 8s 24ms/step - loss: 0.4606 - accuracy: 0.8363
Epoch 19/30
354/354 [==============================] - 8s 24ms/step - loss: 0.4221 - accuracy: 0.8524
Epoch 20/30
354/354 [==============================] - 8s 24ms/step - loss: 0.3885 - accuracy: 0.8637
Epoch 21/30
354/354 [==============================] - 8s 24ms/step - loss: 0.3564 - accuracy: 0.8742
Epoch 22/30
354/354 [==============================] - 9s 24ms/step - loss: 0.3275 - accuracy: 0.8839
Epoch 23/30
354/354 [==============================] - 8s 24ms/step - loss: 0.3058 - accuracy: 0.8920
Epoch 24/30
354/354 [==============================] - 8s 24ms/step - loss: 0.2838 - accuracy: 0.8989
Epoch 25/30
354/354 [==============================] - 8s 24ms/step - loss: 0.2604 - accuracy: 0.9089
Epoch 26/30
354/354 [==============================] - 8s 24ms/step - loss: 0.2419 - accuracy: 0.9153
Epoch 27/30
354/354 [==============================] - 8s 24ms/step - loss: 0.2236 - accuracy: 0.9214
Epoch 28/30
354/354 [==============================] - 8s 24ms/step - loss: 0.2090 - accuracy: 0.9263
Epoch 29/30
354/354 [==============================] - 9s 24ms/step - loss: 0.1947 - accuracy: 0.9330
Epoch 30/30
354/354 [==============================] - 8s 24ms/step - loss: 0.1791 - accuracy: 0.9372

Best score = 0.77 using {'batch_size': 128, 'epochs': 30, 'padding': 'same'}

99/99 [==============================] - 1s 7ms/step
mean test accuracy +/- std = 0.7660 +/- 0.0025 with: {'batch_size': 128, 'epochs': 30, 'padding': 'same'}
mean test accuracy +/- std = nan +/- nan with: {'batch_size': 128, 'epochs': 30, 'padding': 'valid'}
mean test accuracy +/- std = nan +/- nan with: {'batch_size': 128, 'epochs': 30, 'padding': 'causal'}

Accuracy achieved on the best model
83.95%

Tuning the optimiser and learning rate

In [ ]:
def create_model(optimizer=optimizers.Adam, learning_rate=0.0001, dropout_rate=0.2):
  model = models.Sequential()

  # 1st convolutional layer
  model.add(layers.Conv2D(64, kernel_size=3, strides=1, padding='same', activation='relu', input_shape=(x_train.shape[1:])))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))    

  # 2nd convolutional layer
  model.add(layers.Conv2D(128, kernel_size=5, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  # 3rd convolutional layer
  model.add(layers.Conv2D(256, kernel_size=3, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  # 4th convolutional layer
  model.add(layers.Conv2D(256, kernel_size=3, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  model.add(layers.Flatten())

  # 1st fully connected dense layer
  model.add(layers.Dense(256, activation='relu'))
  model.add(layers.BatchNormalization())
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  # 2nd fully connected dense layer
  model.add(layers.Dense(256, activation='relu'))
  model.add(layers.BatchNormalization())
  if dropout_rate != 0:
    model.add(layers.Dropout(dropout_rate))  

  # final dense layer with number of classes
  model.add(layers.Dense(7, activation='softmax'))

  # compile the model
  model.compile(optimizer=optimizer(learning_rate), loss='categorical_crossentropy', metrics=['accuracy'])
  return model
In [ ]:
# wrapping the existing model around KerasClassifier to use it with scikit-learn
model = KerasClassifier(build_fn=create_model, optimizer=optimizers.Adam, learning_rate=0.0001, dropout_rate=0.2, verbose=1)

# specifying the hyperparameters to be tuned during the grid search 
param_grid = {
    'optimizer': [optimizers.RMSprop, optimizers.Adagrad, optimizers.Adam, optimizers.Adamax, optimizers.Nadam],
    'learning_rate': [0.0001, 0.00001, 0.0005, 0.00005],
    'epochs': [30],
    'batch_size': [64, 128],
}

# fitting the model and evaluating the results
grid_searcher = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=3)
grid_searcher_result = grid_searcher.fit(x_train, y_train)
plot_grid_search_results(grid_searcher_result)
/usr/local/lib/python3.7/dist-packages/joblib/externals/loky/process_executor.py:705: UserWarning: A worker stopped while some jobs were given to the executor. This can be caused by a too short worker timeout or by a memory leak.
  "timeout or by a memory leak.", UserWarning
/usr/local/lib/python3.7/dist-packages/scikeras/wrappers.py:290: UserWarning: ``build_fn`` will be renamed to ``model`` in a future release, at which point use of ``build_fn`` will raise an Error instead.
  "``build_fn`` will be renamed to ``model`` in a future release,"
Epoch 1/30
708/708 [==============================] - 16s 18ms/step - loss: 2.0423 - accuracy: 0.2424
Epoch 2/30
708/708 [==============================] - 12s 18ms/step - loss: 1.5573 - accuracy: 0.4130
Epoch 3/30
708/708 [==============================] - 12s 18ms/step - loss: 1.2539 - accuracy: 0.5278
Epoch 4/30
708/708 [==============================] - 12s 18ms/step - loss: 1.0888 - accuracy: 0.5898
Epoch 5/30
708/708 [==============================] - 13s 18ms/step - loss: 0.9752 - accuracy: 0.6335
Epoch 6/30
708/708 [==============================] - 12s 18ms/step - loss: 0.9010 - accuracy: 0.6619
Epoch 7/30
708/708 [==============================] - 12s 18ms/step - loss: 0.8291 - accuracy: 0.6890
Epoch 8/30
708/708 [==============================] - 12s 18ms/step - loss: 0.7633 - accuracy: 0.7181
Epoch 9/30
708/708 [==============================] - 12s 18ms/step - loss: 0.7059 - accuracy: 0.7399
Epoch 10/30
708/708 [==============================] - 12s 18ms/step - loss: 0.6468 - accuracy: 0.7653
Epoch 11/30
708/708 [==============================] - 12s 18ms/step - loss: 0.6094 - accuracy: 0.7775
Epoch 12/30
708/708 [==============================] - 12s 18ms/step - loss: 0.5601 - accuracy: 0.7981
Epoch 13/30
708/708 [==============================] - 12s 18ms/step - loss: 0.5241 - accuracy: 0.8126
Epoch 14/30
708/708 [==============================] - 12s 18ms/step - loss: 0.4873 - accuracy: 0.8249
Epoch 15/30
708/708 [==============================] - 12s 18ms/step - loss: 0.4613 - accuracy: 0.8348
Epoch 16/30
708/708 [==============================] - 12s 18ms/step - loss: 0.4387 - accuracy: 0.8428
Epoch 17/30
708/708 [==============================] - 12s 18ms/step - loss: 0.4085 - accuracy: 0.8540
Epoch 18/30
708/708 [==============================] - 12s 18ms/step - loss: 0.3905 - accuracy: 0.8606
Epoch 19/30
708/708 [==============================] - 12s 18ms/step - loss: 0.3707 - accuracy: 0.8684
Epoch 20/30
708/708 [==============================] - 12s 18ms/step - loss: 0.3581 - accuracy: 0.8721
Epoch 21/30
708/708 [==============================] - 12s 18ms/step - loss: 0.3389 - accuracy: 0.8792
Epoch 22/30
708/708 [==============================] - 12s 18ms/step - loss: 0.3244 - accuracy: 0.8856
Epoch 23/30
708/708 [==============================] - 12s 18ms/step - loss: 0.3148 - accuracy: 0.8875
Epoch 24/30
708/708 [==============================] - 12s 18ms/step - loss: 0.2960 - accuracy: 0.8951
Epoch 25/30
708/708 [==============================] - 12s 18ms/step - loss: 0.2884 - accuracy: 0.8982
Epoch 26/30
708/708 [==============================] - 12s 18ms/step - loss: 0.2817 - accuracy: 0.9020
Epoch 27/30
708/708 [==============================] - 12s 18ms/step - loss: 0.2704 - accuracy: 0.9036
Epoch 28/30
708/708 [==============================] - 12s 18ms/step - loss: 0.2582 - accuracy: 0.9084
Epoch 29/30
708/708 [==============================] - 12s 18ms/step - loss: 0.2531 - accuracy: 0.9097
Epoch 30/30
708/708 [==============================] - 13s 18ms/step - loss: 0.2484 - accuracy: 0.9133

Best score = 0.79 using {'batch_size': 64, 'epochs': 30, 'learning_rate': 0.0005, 'optimizer': <class 'keras.optimizer_v2.adamax.Adamax'>}

197/197 [==============================] - 2s 6ms/step
mean test accuracy +/- std = 0.7445 +/- 0.0037 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 0.0001, 'optimizer': <class 'keras.optimizer_v2.rmsprop.RMSprop'>}
mean test accuracy +/- std = 0.7432 +/- 0.0043 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 0.0001, 'optimizer': <class 'keras.optimizer_v2.adagrad.Adagrad'>}
mean test accuracy +/- std = 0.7471 +/- 0.0035 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 0.0001, 'optimizer': <class 'keras.optimizer_v2.adam.Adam'>}
mean test accuracy +/- std = 0.7421 +/- 0.0042 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 0.0001, 'optimizer': <class 'keras.optimizer_v2.adamax.Adamax'>}
mean test accuracy +/- std = 0.7468 +/- 0.0011 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 0.0001, 'optimizer': <class 'keras.optimizer_v2.nadam.Nadam'>}
mean test accuracy +/- std = 0.4606 +/- 0.0042 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 1e-05, 'optimizer': <class 'keras.optimizer_v2.rmsprop.RMSprop'>}
mean test accuracy +/- std = 0.4621 +/- 0.0115 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 1e-05, 'optimizer': <class 'keras.optimizer_v2.adagrad.Adagrad'>}
mean test accuracy +/- std = 0.4690 +/- 0.0029 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 1e-05, 'optimizer': <class 'keras.optimizer_v2.adam.Adam'>}
mean test accuracy +/- std = 0.4538 +/- 0.0054 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 1e-05, 'optimizer': <class 'keras.optimizer_v2.adamax.Adamax'>}
mean test accuracy +/- std = 0.4599 +/- 0.0187 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 1e-05, 'optimizer': <class 'keras.optimizer_v2.nadam.Nadam'>}
mean test accuracy +/- std = 0.7816 +/- 0.0043 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 0.0005, 'optimizer': <class 'keras.optimizer_v2.rmsprop.RMSprop'>}
mean test accuracy +/- std = 0.7754 +/- 0.0072 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 0.0005, 'optimizer': <class 'keras.optimizer_v2.adagrad.Adagrad'>}
mean test accuracy +/- std = 0.7815 +/- 0.0036 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 0.0005, 'optimizer': <class 'keras.optimizer_v2.adam.Adam'>}
mean test accuracy +/- std = 0.7872 +/- 0.0066 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 0.0005, 'optimizer': <class 'keras.optimizer_v2.adamax.Adamax'>}
mean test accuracy +/- std = 0.7795 +/- 0.0035 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 0.0005, 'optimizer': <class 'keras.optimizer_v2.nadam.Nadam'>}
mean test accuracy +/- std = 0.6685 +/- 0.0036 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 5e-05, 'optimizer': <class 'keras.optimizer_v2.rmsprop.RMSprop'>}
mean test accuracy +/- std = 0.6718 +/- 0.0032 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 5e-05, 'optimizer': <class 'keras.optimizer_v2.adagrad.Adagrad'>}
mean test accuracy +/- std = 0.6753 +/- 0.0063 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 5e-05, 'optimizer': <class 'keras.optimizer_v2.adam.Adam'>}
mean test accuracy +/- std = 0.6742 +/- 0.0034 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 5e-05, 'optimizer': <class 'keras.optimizer_v2.adamax.Adamax'>}
mean test accuracy +/- std = 0.6725 +/- 0.0072 with: {'batch_size': 64, 'epochs': 30, 'learning_rate': 5e-05, 'optimizer': <class 'keras.optimizer_v2.nadam.Nadam'>}
mean test accuracy +/- std = 0.7088 +/- 0.0038 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 0.0001, 'optimizer': <class 'keras.optimizer_v2.rmsprop.RMSprop'>}
mean test accuracy +/- std = 0.7078 +/- 0.0029 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 0.0001, 'optimizer': <class 'keras.optimizer_v2.adagrad.Adagrad'>}
mean test accuracy +/- std = 0.7112 +/- 0.0039 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 0.0001, 'optimizer': <class 'keras.optimizer_v2.adam.Adam'>}
mean test accuracy +/- std = 0.7156 +/- 0.0050 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 0.0001, 'optimizer': <class 'keras.optimizer_v2.adamax.Adamax'>}
mean test accuracy +/- std = 0.7094 +/- 0.0056 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 0.0001, 'optimizer': <class 'keras.optimizer_v2.nadam.Nadam'>}
mean test accuracy +/- std = 0.4130 +/- 0.0021 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 1e-05, 'optimizer': <class 'keras.optimizer_v2.rmsprop.RMSprop'>}
mean test accuracy +/- std = 0.4246 +/- 0.0048 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 1e-05, 'optimizer': <class 'keras.optimizer_v2.adagrad.Adagrad'>}
mean test accuracy +/- std = 0.4200 +/- 0.0088 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 1e-05, 'optimizer': <class 'keras.optimizer_v2.adam.Adam'>}
mean test accuracy +/- std = 0.4194 +/- 0.0033 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 1e-05, 'optimizer': <class 'keras.optimizer_v2.adamax.Adamax'>}
mean test accuracy +/- std = 0.4116 +/- 0.0059 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 1e-05, 'optimizer': <class 'keras.optimizer_v2.nadam.Nadam'>}
mean test accuracy +/- std = 0.7807 +/- 0.0059 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 0.0005, 'optimizer': <class 'keras.optimizer_v2.rmsprop.RMSprop'>}
mean test accuracy +/- std = 0.7802 +/- 0.0050 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 0.0005, 'optimizer': <class 'keras.optimizer_v2.adagrad.Adagrad'>}
mean test accuracy +/- std = 0.7817 +/- 0.0054 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 0.0005, 'optimizer': <class 'keras.optimizer_v2.adam.Adam'>}
mean test accuracy +/- std = 0.7687 +/- 0.0105 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 0.0005, 'optimizer': <class 'keras.optimizer_v2.adamax.Adamax'>}
mean test accuracy +/- std = 0.7798 +/- 0.0033 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 0.0005, 'optimizer': <class 'keras.optimizer_v2.nadam.Nadam'>}
mean test accuracy +/- std = 0.6317 +/- 0.0044 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 5e-05, 'optimizer': <class 'keras.optimizer_v2.rmsprop.RMSprop'>}
mean test accuracy +/- std = 0.6318 +/- 0.0037 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 5e-05, 'optimizer': <class 'keras.optimizer_v2.adagrad.Adagrad'>}
mean test accuracy +/- std = 0.6313 +/- 0.0013 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 5e-05, 'optimizer': <class 'keras.optimizer_v2.adam.Adam'>}
mean test accuracy +/- std = 0.6347 +/- 0.0064 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 5e-05, 'optimizer': <class 'keras.optimizer_v2.adamax.Adamax'>}
mean test accuracy +/- std = 0.6310 +/- 0.0105 with: {'batch_size': 128, 'epochs': 30, 'learning_rate': 5e-05, 'optimizer': <class 'keras.optimizer_v2.nadam.Nadam'>}

Accuracy achieved on the best model
83.31%

Combining Models

ReLu Activation with Average Pooling

In [ ]:
def combined_model_1():
  model = models.Sequential()

  # 1st convolutional layer
  model.add(layers.Conv2D(64, kernel_size=3, strides=1, padding='same', activation='relu', input_shape=(x_train.shape[1:])))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  model.add(layers.Dropout(0.25))

  # 2nd convolutional layer
  model.add(layers.Conv2D(128, kernel_size=5, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  model.add(layers.Dropout(0.25))

  # 3rd convolutional layer
  model.add(layers.Conv2D(512, kernel_size=3, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  model.add(layers.Dropout(0.25))

  # 4th convolutional layer
  model.add(layers.Conv2D(512, kernel_size=3, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  model.add(layers.Dropout(0.25))
  model.add(layers.Flatten())

  # 1st fully connected dense layer
  model.add(layers.Dense(256, activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.Dropout(0.25))

  # 2nd fully connected dense layer
  model.add(layers.Dense(512, activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.Dropout(0.25))

  # final dense layer with number of classes
  model.add(layers.Dense(7, activation='softmax'))

  # compile the model
  model.compile(optimizer=optimizers.Adam(0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
  return model
In [ ]:
model = combined_model_1()
plot_model(model, to_file="model.png", show_shapes=True, show_layer_names=True)
Out[ ]:
In [ ]:
history5 = train_model(model, 30, 64)
model.evaluate(x_test, y_test)
Epoch 1/30
707/708 [============================>.] - ETA: 0s - loss: 2.1996 - accuracy: 0.2309
Epoch 1: val_loss improved from inf to 1.81243, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 2.1993 - accuracy: 0.2310 - val_loss: 1.8124 - val_accuracy: 0.3270 - lr: 1.0000e-04
Epoch 2/30
708/708 [==============================] - ETA: 0s - loss: 1.8299 - accuracy: 0.3392
Epoch 2: val_loss improved from 1.81243 to 1.61387, saving model to best-model.h5
708/708 [==============================] - 18s 25ms/step - loss: 1.8299 - accuracy: 0.3392 - val_loss: 1.6139 - val_accuracy: 0.4120 - lr: 1.0000e-04
Epoch 3/30
708/708 [==============================] - ETA: 0s - loss: 1.5866 - accuracy: 0.4211
Epoch 3: val_loss improved from 1.61387 to 1.30976, saving model to best-model.h5
708/708 [==============================] - 18s 25ms/step - loss: 1.5866 - accuracy: 0.4211 - val_loss: 1.3098 - val_accuracy: 0.5028 - lr: 1.0000e-04
Epoch 4/30
707/708 [============================>.] - ETA: 0s - loss: 1.4040 - accuracy: 0.4848
Epoch 4: val_loss improved from 1.30976 to 1.17384, saving model to best-model.h5
708/708 [==============================] - 18s 25ms/step - loss: 1.4040 - accuracy: 0.4849 - val_loss: 1.1738 - val_accuracy: 0.5602 - lr: 1.0000e-04
Epoch 5/30
708/708 [==============================] - ETA: 0s - loss: 1.2695 - accuracy: 0.5281
Epoch 5: val_loss improved from 1.17384 to 1.07224, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 1.2695 - accuracy: 0.5281 - val_loss: 1.0722 - val_accuracy: 0.5961 - lr: 1.0000e-04
Epoch 6/30
708/708 [==============================] - ETA: 0s - loss: 1.1556 - accuracy: 0.5696
Epoch 6: val_loss improved from 1.07224 to 1.06693, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 1.1556 - accuracy: 0.5696 - val_loss: 1.0669 - val_accuracy: 0.6033 - lr: 1.0000e-04
Epoch 7/30
706/708 [============================>.] - ETA: 0s - loss: 1.0677 - accuracy: 0.6018
Epoch 7: val_loss improved from 1.06693 to 0.92752, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 1.0677 - accuracy: 0.6017 - val_loss: 0.9275 - val_accuracy: 0.6563 - lr: 1.0000e-04
Epoch 8/30
707/708 [============================>.] - ETA: 0s - loss: 0.9901 - accuracy: 0.6311
Epoch 8: val_loss improved from 0.92752 to 0.89566, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 0.9901 - accuracy: 0.6311 - val_loss: 0.8957 - val_accuracy: 0.6677 - lr: 1.0000e-04
Epoch 9/30
706/708 [============================>.] - ETA: 0s - loss: 0.9288 - accuracy: 0.6550
Epoch 9: val_loss improved from 0.89566 to 0.89384, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 0.9290 - accuracy: 0.6550 - val_loss: 0.8938 - val_accuracy: 0.6619 - lr: 1.0000e-04
Epoch 10/30
706/708 [============================>.] - ETA: 0s - loss: 0.8630 - accuracy: 0.6804
Epoch 10: val_loss improved from 0.89384 to 0.81526, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 0.8633 - accuracy: 0.6803 - val_loss: 0.8153 - val_accuracy: 0.6990 - lr: 1.0000e-04
Epoch 11/30
707/708 [============================>.] - ETA: 0s - loss: 0.8123 - accuracy: 0.7023
Epoch 11: val_loss improved from 0.81526 to 0.80827, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 0.8121 - accuracy: 0.7022 - val_loss: 0.8083 - val_accuracy: 0.6971 - lr: 1.0000e-04
Epoch 12/30
706/708 [============================>.] - ETA: 0s - loss: 0.7586 - accuracy: 0.7189
Epoch 12: val_loss improved from 0.80827 to 0.76015, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 0.7582 - accuracy: 0.7190 - val_loss: 0.7601 - val_accuracy: 0.7187 - lr: 1.0000e-04
Epoch 13/30
707/708 [============================>.] - ETA: 0s - loss: 0.7097 - accuracy: 0.7393
Epoch 13: val_loss improved from 0.76015 to 0.71031, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 0.7096 - accuracy: 0.7393 - val_loss: 0.7103 - val_accuracy: 0.7429 - lr: 1.0000e-04
Epoch 14/30
706/708 [============================>.] - ETA: 0s - loss: 0.6573 - accuracy: 0.7593
Epoch 14: val_loss improved from 0.71031 to 0.70321, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 0.6574 - accuracy: 0.7593 - val_loss: 0.7032 - val_accuracy: 0.7535 - lr: 1.0000e-04
Epoch 15/30
708/708 [==============================] - ETA: 0s - loss: 0.6038 - accuracy: 0.7809
Epoch 15: val_loss improved from 0.70321 to 0.66662, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 0.6038 - accuracy: 0.7809 - val_loss: 0.6666 - val_accuracy: 0.7676 - lr: 1.0000e-04
Epoch 16/30
706/708 [============================>.] - ETA: 0s - loss: 0.5641 - accuracy: 0.7966
Epoch 16: val_loss improved from 0.66662 to 0.64667, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 0.5646 - accuracy: 0.7964 - val_loss: 0.6467 - val_accuracy: 0.7767 - lr: 1.0000e-04
Epoch 17/30
707/708 [============================>.] - ETA: 0s - loss: 0.5217 - accuracy: 0.8131
Epoch 17: val_loss improved from 0.64667 to 0.62553, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 0.5215 - accuracy: 0.8132 - val_loss: 0.6255 - val_accuracy: 0.7841 - lr: 1.0000e-04
Epoch 18/30
707/708 [============================>.] - ETA: 0s - loss: 0.4774 - accuracy: 0.8277
Epoch 18: val_loss improved from 0.62553 to 0.61250, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 0.4773 - accuracy: 0.8277 - val_loss: 0.6125 - val_accuracy: 0.7960 - lr: 1.0000e-04
Epoch 19/30
707/708 [============================>.] - ETA: 0s - loss: 0.4496 - accuracy: 0.8390
Epoch 19: val_loss improved from 0.61250 to 0.60485, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 0.4499 - accuracy: 0.8388 - val_loss: 0.6049 - val_accuracy: 0.8049 - lr: 1.0000e-04
Epoch 20/30
706/708 [============================>.] - ETA: 0s - loss: 0.4145 - accuracy: 0.8523
Epoch 20: val_loss improved from 0.60485 to 0.59340, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 0.4142 - accuracy: 0.8524 - val_loss: 0.5934 - val_accuracy: 0.8157 - lr: 1.0000e-04
Epoch 21/30
708/708 [==============================] - ETA: 0s - loss: 0.3832 - accuracy: 0.8639
Epoch 21: val_loss did not improve from 0.59340
708/708 [==============================] - 18s 26ms/step - loss: 0.3832 - accuracy: 0.8639 - val_loss: 0.6258 - val_accuracy: 0.8103 - lr: 1.0000e-04
Epoch 22/30
706/708 [============================>.] - ETA: 0s - loss: 0.3568 - accuracy: 0.8748
Epoch 22: val_loss improved from 0.59340 to 0.58912, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 0.3569 - accuracy: 0.8747 - val_loss: 0.5891 - val_accuracy: 0.8246 - lr: 1.0000e-04
Epoch 23/30
707/708 [============================>.] - ETA: 0s - loss: 0.3309 - accuracy: 0.8821
Epoch 23: val_loss did not improve from 0.58912
708/708 [==============================] - 18s 26ms/step - loss: 0.3310 - accuracy: 0.8821 - val_loss: 0.5921 - val_accuracy: 0.8268 - lr: 1.0000e-04
Epoch 24/30
708/708 [==============================] - ETA: 0s - loss: 0.3063 - accuracy: 0.8930
Epoch 24: val_loss did not improve from 0.58912
708/708 [==============================] - 18s 26ms/step - loss: 0.3063 - accuracy: 0.8930 - val_loss: 0.5935 - val_accuracy: 0.8302 - lr: 1.0000e-04
Epoch 25/30
707/708 [============================>.] - ETA: 0s - loss: 0.2837 - accuracy: 0.8995
Epoch 25: val_loss did not improve from 0.58912

Epoch 25: ReduceLROnPlateau reducing learning rate to 3.1622775802825264e-05.
708/708 [==============================] - 18s 26ms/step - loss: 0.2836 - accuracy: 0.8995 - val_loss: 0.6047 - val_accuracy: 0.8232 - lr: 1.0000e-04
Epoch 26/30
706/708 [============================>.] - ETA: 0s - loss: 0.2312 - accuracy: 0.9183
Epoch 26: val_loss improved from 0.58912 to 0.55925, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 0.2311 - accuracy: 0.9184 - val_loss: 0.5592 - val_accuracy: 0.8399 - lr: 3.1623e-05
Epoch 27/30
708/708 [==============================] - ETA: 0s - loss: 0.2059 - accuracy: 0.9280
Epoch 27: val_loss did not improve from 0.55925
708/708 [==============================] - 18s 26ms/step - loss: 0.2059 - accuracy: 0.9280 - val_loss: 0.5644 - val_accuracy: 0.8403 - lr: 3.1623e-05
Epoch 28/30
707/708 [============================>.] - ETA: 0s - loss: 0.1920 - accuracy: 0.9313
Epoch 28: val_loss did not improve from 0.55925
708/708 [==============================] - 18s 26ms/step - loss: 0.1919 - accuracy: 0.9313 - val_loss: 0.5725 - val_accuracy: 0.8445 - lr: 3.1623e-05
Epoch 29/30
706/708 [============================>.] - ETA: 0s - loss: 0.1794 - accuracy: 0.9375
Epoch 29: val_loss did not improve from 0.55925

Epoch 29: ReduceLROnPlateau reducing learning rate to 9.999999259090306e-06.
708/708 [==============================] - 18s 26ms/step - loss: 0.1794 - accuracy: 0.9375 - val_loss: 0.5857 - val_accuracy: 0.8413 - lr: 3.1623e-05
Epoch 30/30
707/708 [============================>.] - ETA: 0s - loss: 0.1643 - accuracy: 0.9437
Epoch 30: val_loss did not improve from 0.55925
708/708 [==============================] - 18s 26ms/step - loss: 0.1645 - accuracy: 0.9437 - val_loss: 0.5789 - val_accuracy: 0.8449 - lr: 1.0000e-05
394/394 [==============================] - 2s 6ms/step - loss: 0.6089 - accuracy: 0.8439
Out[ ]:
[0.6088735461235046, 0.8438617587089539]
Combine Model 1 Results
In [ ]:
plot_model_history(history5, name="Combined Model 1")
Min validation loss: 0.5592455863952637 
Max validation loss: 1.812427282333374 
Min validation acc: 0.32697656750679016 
Max validation acc: 0.844855010509491
Model 2 vs Combine Model 1
In [ ]:
compare_model_history(history2, history5, name="Model 2 vs Combined Model 1", key1="Model 2", key2="Combined Model 1")
Model 2 validation loss: 0.5889896154403687 
Combined Model 1 validation loss: 0.5592455863952637

Model 2 validation accuracy: 0.8458482027053833 
Combined Model 1 validation accuracy: 0.844855010509491

Kernel Size of 5 with Dropout Regularisation of 0.2

In [ ]:
def combined_model_2():
  model = models.Sequential()

  # 1st convolutional layer
  model.add(layers.Conv2D(64, kernel_size=5, strides=1, padding='same', activation='relu', input_shape=(x_train.shape[1:])))
  model.add(layers.BatchNormalization())
  model.add(layers.MaxPooling2D((2, 2)))
  model.add(layers.Dropout(0.2))

  # 2nd convolutional layer
  model.add(layers.Conv2D(128, kernel_size=5, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.MaxPooling2D((2, 2)))
  model.add(layers.Dropout(0.2))

  # 3rd convolutional layer
  model.add(layers.Conv2D(512, kernel_size=5, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.MaxPooling2D((2, 2)))
  model.add(layers.Dropout(0.2))

  # 4th convolutional layer
  model.add(layers.Conv2D(512, kernel_size=5, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.MaxPooling2D((2, 2)))
  model.add(layers.Dropout(0.2))
  model.add(layers.Flatten())

  # 1st fully connected dense layer
  model.add(layers.Dense(256, activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.Dropout(0.2))

  # 2nd fully connected dense layer
  model.add(layers.Dense(512, activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.Dropout(0.2))

  # final dense layer with number of classes
  model.add(layers.Dense(7, activation='softmax'))

  # compile the model
  model.compile(optimizer=optimizers.Adam(0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
  return model
In [ ]:
model = combined_model_2()
plot_model(model, to_file="model.png", show_shapes=True, show_layer_names=True)
Out[ ]:
In [ ]:
history6 = train_model(model, 30, 64)
model.evaluate(x_test, y_test)
Epoch 1/30
707/708 [============================>.] - ETA: 0s - loss: 2.0732 - accuracy: 0.2627
Epoch 1: val_loss improved from inf to 2.02089, saving model to best-model.h5
708/708 [==============================] - 32s 43ms/step - loss: 2.0727 - accuracy: 0.2628 - val_loss: 2.0209 - val_accuracy: 0.2960 - lr: 1.0000e-04
Epoch 2/30
707/708 [============================>.] - ETA: 0s - loss: 1.6464 - accuracy: 0.4002
Epoch 2: val_loss improved from 2.02089 to 1.44083, saving model to best-model.h5
708/708 [==============================] - 30s 42ms/step - loss: 1.6462 - accuracy: 0.4004 - val_loss: 1.4408 - val_accuracy: 0.4688 - lr: 1.0000e-04
Epoch 3/30
707/708 [============================>.] - ETA: 0s - loss: 1.3828 - accuracy: 0.4893
Epoch 3: val_loss improved from 1.44083 to 1.23598, saving model to best-model.h5
708/708 [==============================] - 30s 42ms/step - loss: 1.3827 - accuracy: 0.4894 - val_loss: 1.2360 - val_accuracy: 0.5348 - lr: 1.0000e-04
Epoch 4/30
707/708 [============================>.] - ETA: 0s - loss: 1.1872 - accuracy: 0.5614
Epoch 4: val_loss improved from 1.23598 to 1.04890, saving model to best-model.h5
708/708 [==============================] - 30s 42ms/step - loss: 1.1873 - accuracy: 0.5613 - val_loss: 1.0489 - val_accuracy: 0.6075 - lr: 1.0000e-04
Epoch 5/30
707/708 [============================>.] - ETA: 0s - loss: 1.0364 - accuracy: 0.6131
Epoch 5: val_loss improved from 1.04890 to 0.93198, saving model to best-model.h5
708/708 [==============================] - 30s 42ms/step - loss: 1.0364 - accuracy: 0.6130 - val_loss: 0.9320 - val_accuracy: 0.6589 - lr: 1.0000e-04
Epoch 6/30
707/708 [============================>.] - ETA: 0s - loss: 0.8909 - accuracy: 0.6699
Epoch 6: val_loss improved from 0.93198 to 0.85500, saving model to best-model.h5
708/708 [==============================] - 30s 42ms/step - loss: 0.8907 - accuracy: 0.6700 - val_loss: 0.8550 - val_accuracy: 0.6838 - lr: 1.0000e-04
Epoch 7/30
707/708 [============================>.] - ETA: 0s - loss: 0.7717 - accuracy: 0.7169
Epoch 7: val_loss improved from 0.85500 to 0.82079, saving model to best-model.h5
708/708 [==============================] - 30s 42ms/step - loss: 0.7715 - accuracy: 0.7170 - val_loss: 0.8208 - val_accuracy: 0.7066 - lr: 1.0000e-04
Epoch 8/30
707/708 [============================>.] - ETA: 0s - loss: 0.6481 - accuracy: 0.7652
Epoch 8: val_loss improved from 0.82079 to 0.71901, saving model to best-model.h5
708/708 [==============================] - 30s 42ms/step - loss: 0.6482 - accuracy: 0.7652 - val_loss: 0.7190 - val_accuracy: 0.7513 - lr: 1.0000e-04
Epoch 9/30
707/708 [============================>.] - ETA: 0s - loss: 0.5285 - accuracy: 0.8127
Epoch 9: val_loss improved from 0.71901 to 0.67376, saving model to best-model.h5
708/708 [==============================] - 30s 43ms/step - loss: 0.5284 - accuracy: 0.8127 - val_loss: 0.6738 - val_accuracy: 0.7813 - lr: 1.0000e-04
Epoch 10/30
707/708 [============================>.] - ETA: 0s - loss: 0.4246 - accuracy: 0.8510
Epoch 10: val_loss improved from 0.67376 to 0.62551, saving model to best-model.h5
708/708 [==============================] - 30s 43ms/step - loss: 0.4246 - accuracy: 0.8511 - val_loss: 0.6255 - val_accuracy: 0.8043 - lr: 1.0000e-04
Epoch 11/30
707/708 [============================>.] - ETA: 0s - loss: 0.3390 - accuracy: 0.8817
Epoch 11: val_loss did not improve from 0.62551
708/708 [==============================] - 30s 42ms/step - loss: 0.3390 - accuracy: 0.8818 - val_loss: 0.6462 - val_accuracy: 0.8053 - lr: 1.0000e-04
Epoch 12/30
707/708 [============================>.] - ETA: 0s - loss: 0.2717 - accuracy: 0.9057
Epoch 12: val_loss did not improve from 0.62551
708/708 [==============================] - 30s 42ms/step - loss: 0.2719 - accuracy: 0.9056 - val_loss: 0.6768 - val_accuracy: 0.8111 - lr: 1.0000e-04
Epoch 13/30
707/708 [============================>.] - ETA: 0s - loss: 0.2352 - accuracy: 0.9179
Epoch 13: val_loss did not improve from 0.62551

Epoch 13: ReduceLROnPlateau reducing learning rate to 3.1622775802825264e-05.
708/708 [==============================] - 30s 42ms/step - loss: 0.2355 - accuracy: 0.9178 - val_loss: 0.6850 - val_accuracy: 0.8149 - lr: 1.0000e-04
Epoch 14/30
707/708 [============================>.] - ETA: 0s - loss: 0.1432 - accuracy: 0.9522
Epoch 14: val_loss improved from 0.62551 to 0.62316, saving model to best-model.h5
708/708 [==============================] - 30s 43ms/step - loss: 0.1432 - accuracy: 0.9522 - val_loss: 0.6232 - val_accuracy: 0.8351 - lr: 3.1623e-05
Epoch 15/30
707/708 [============================>.] - ETA: 0s - loss: 0.1054 - accuracy: 0.9650
Epoch 15: val_loss did not improve from 0.62316
708/708 [==============================] - 30s 42ms/step - loss: 0.1054 - accuracy: 0.9649 - val_loss: 0.6567 - val_accuracy: 0.8319 - lr: 3.1623e-05
Epoch 16/30
707/708 [============================>.] - ETA: 0s - loss: 0.0859 - accuracy: 0.9729
Epoch 16: val_loss did not improve from 0.62316
708/708 [==============================] - 30s 42ms/step - loss: 0.0860 - accuracy: 0.9729 - val_loss: 0.6682 - val_accuracy: 0.8367 - lr: 3.1623e-05
Epoch 17/30
707/708 [============================>.] - ETA: 0s - loss: 0.0736 - accuracy: 0.9763
Epoch 17: val_loss did not improve from 0.62316

Epoch 17: ReduceLROnPlateau reducing learning rate to 9.999999259090306e-06.
708/708 [==============================] - 30s 42ms/step - loss: 0.0736 - accuracy: 0.9763 - val_loss: 0.6944 - val_accuracy: 0.8331 - lr: 3.1623e-05
Epoch 18/30
707/708 [============================>.] - ETA: 0s - loss: 0.0600 - accuracy: 0.9812
Epoch 18: val_loss did not improve from 0.62316
708/708 [==============================] - 30s 42ms/step - loss: 0.0599 - accuracy: 0.9812 - val_loss: 0.6876 - val_accuracy: 0.8327 - lr: 1.0000e-05
Epoch 19/30
707/708 [============================>.] - ETA: 0s - loss: 0.0558 - accuracy: 0.9827
Epoch 19: val_loss did not improve from 0.62316
708/708 [==============================] - 30s 42ms/step - loss: 0.0557 - accuracy: 0.9827 - val_loss: 0.6867 - val_accuracy: 0.8327 - lr: 1.0000e-05
Epoch 20/30
707/708 [============================>.] - ETA: 0s - loss: 0.0498 - accuracy: 0.9844
Epoch 20: val_loss did not improve from 0.62316

Epoch 20: ReduceLROnPlateau reducing learning rate to 3.162277292675049e-06.
708/708 [==============================] - 30s 42ms/step - loss: 0.0497 - accuracy: 0.9844 - val_loss: 0.6903 - val_accuracy: 0.8381 - lr: 1.0000e-05
Epoch 21/30
707/708 [============================>.] - ETA: 0s - loss: 0.0475 - accuracy: 0.9851
Epoch 21: val_loss did not improve from 0.62316
708/708 [==============================] - 30s 42ms/step - loss: 0.0474 - accuracy: 0.9851 - val_loss: 0.6940 - val_accuracy: 0.8375 - lr: 3.1623e-06
Epoch 22/30
707/708 [============================>.] - ETA: 0s - loss: 0.0439 - accuracy: 0.9863
Epoch 22: val_loss did not improve from 0.62316
708/708 [==============================] - 30s 42ms/step - loss: 0.0439 - accuracy: 0.9863 - val_loss: 0.6932 - val_accuracy: 0.8383 - lr: 3.1623e-06
Epoch 23/30
707/708 [============================>.] - ETA: 0s - loss: 0.0422 - accuracy: 0.9863
Epoch 23: val_loss did not improve from 0.62316

Epoch 23: ReduceLROnPlateau reducing learning rate to 9.999999115286567e-07.
708/708 [==============================] - 30s 42ms/step - loss: 0.0422 - accuracy: 0.9863 - val_loss: 0.6935 - val_accuracy: 0.8395 - lr: 3.1623e-06
Epoch 24/30
707/708 [============================>.] - ETA: 0s - loss: 0.0442 - accuracy: 0.9855
Epoch 24: val_loss did not improve from 0.62316
708/708 [==============================] - 30s 42ms/step - loss: 0.0442 - accuracy: 0.9855 - val_loss: 0.6983 - val_accuracy: 0.8387 - lr: 1.0000e-06
Epoch 25/30
707/708 [============================>.] - ETA: 0s - loss: 0.0418 - accuracy: 0.9872
Epoch 25: val_loss did not improve from 0.62316
708/708 [==============================] - 30s 42ms/step - loss: 0.0418 - accuracy: 0.9872 - val_loss: 0.6994 - val_accuracy: 0.8381 - lr: 1.0000e-06
Epoch 26/30
707/708 [============================>.] - ETA: 0s - loss: 0.0416 - accuracy: 0.9869
Epoch 26: val_loss did not improve from 0.62316

Epoch 26: ReduceLROnPlateau reducing learning rate to 3.1622772926750485e-07.
708/708 [==============================] - 30s 42ms/step - loss: 0.0416 - accuracy: 0.9869 - val_loss: 0.6983 - val_accuracy: 0.8383 - lr: 1.0000e-06
Epoch 27/30
707/708 [============================>.] - ETA: 0s - loss: 0.0427 - accuracy: 0.9866
Epoch 27: val_loss did not improve from 0.62316
708/708 [==============================] - 30s 42ms/step - loss: 0.0427 - accuracy: 0.9866 - val_loss: 0.6984 - val_accuracy: 0.8395 - lr: 3.1623e-07
Epoch 28/30
707/708 [============================>.] - ETA: 0s - loss: 0.0408 - accuracy: 0.9874
Epoch 28: val_loss did not improve from 0.62316
708/708 [==============================] - 30s 42ms/step - loss: 0.0408 - accuracy: 0.9874 - val_loss: 0.6990 - val_accuracy: 0.8397 - lr: 3.1623e-07
Epoch 29/30
707/708 [============================>.] - ETA: 0s - loss: 0.0393 - accuracy: 0.9878
Epoch 29: val_loss did not improve from 0.62316

Epoch 29: ReduceLROnPlateau reducing learning rate to 9.99999875577722e-08.
708/708 [==============================] - 30s 42ms/step - loss: 0.0393 - accuracy: 0.9878 - val_loss: 0.6973 - val_accuracy: 0.8395 - lr: 3.1623e-07
Epoch 30/30
707/708 [============================>.] - ETA: 0s - loss: 0.0400 - accuracy: 0.9877
Epoch 30: val_loss did not improve from 0.62316
708/708 [==============================] - 30s 42ms/step - loss: 0.0400 - accuracy: 0.9877 - val_loss: 0.6978 - val_accuracy: 0.8391 - lr: 1.0000e-07
394/394 [==============================] - 4s 10ms/step - loss: 0.7331 - accuracy: 0.8400
Out[ ]:
[0.733145534992218, 0.839968204498291]
Combined Model 2 Results
In [ ]:
plot_model_history(history6, name="Combined Model 2")
Min validation loss: 0.6231619715690613 
Max validation loss: 2.020892858505249 
Min validation acc: 0.2959872782230377 
Max validation acc: 0.839690089225769
Model 2 vs Combined Model 2
In [ ]:
compare_model_history(history2, history6, name="Model 2 vs Combined Model 2", key1="Model 2", key2="Combined Model 2")
Model 2 validation loss: 0.5889896154403687 
Combined Model 2 validation loss: 0.6231619715690613

Model 2 validation accuracy: 0.8458482027053833 
Combined Model 2 validation accuracy: 0.839690089225769

Adamax Optimiser with Learning Rate of 0.0005

In [ ]:
def combined_model_3():
  model = models.Sequential()

  # 1st convolutional layer
  model.add(layers.Conv2D(64, kernel_size=3, strides=1, padding='same', activation='relu', input_shape=(x_train.shape[1:])))
  model.add(layers.BatchNormalization())
  model.add(layers.MaxPooling2D((2, 2)))
  model.add(layers.Dropout(0.25))

  # 2nd convolutional layer
  model.add(layers.Conv2D(128, kernel_size=5, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.MaxPooling2D((2, 2)))
  model.add(layers.Dropout(0.25))

  # 3rd convolutional layer
  model.add(layers.Conv2D(512, kernel_size=3, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.MaxPooling2D((2, 2)))
  model.add(layers.Dropout(0.25))

  # 4th convolutional layer
  model.add(layers.Conv2D(512, kernel_size=3, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.MaxPooling2D((2, 2)))
  model.add(layers.Dropout(0.25))
  model.add(layers.Flatten())

  # 1st fully connected dense layer
  model.add(layers.Dense(256, activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.Dropout(0.25))

  # 2nd fully connected dense layer
  model.add(layers.Dense(512, activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.Dropout(0.25))

  # final dense layer with number of classes
  model.add(layers.Dense(7, activation='softmax'))

  # compile the model
  model.compile(optimizer=optimizers.Adamax(0.0005), loss='categorical_crossentropy', metrics=['accuracy'])
  return model
In [ ]:
model = combined_model_3()
plot_model(model, to_file="model.png", show_shapes=True, show_layer_names=True)
Out[ ]:
In [ ]:
history7 = train_model(model, 30, 64)
model.evaluate(x_test, y_test)
Epoch 1/30
707/708 [============================>.] - ETA: 0s - loss: 2.1592 - accuracy: 0.2287
Epoch 1: val_loss improved from inf to 2.03815, saving model to best-model.h5
708/708 [==============================] - 20s 27ms/step - loss: 2.1588 - accuracy: 0.2288 - val_loss: 2.0381 - val_accuracy: 0.2515 - lr: 5.0000e-04
Epoch 2/30
706/708 [============================>.] - ETA: 0s - loss: 1.7901 - accuracy: 0.3404
Epoch 2: val_loss improved from 2.03815 to 1.59138, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 1.7895 - accuracy: 0.3405 - val_loss: 1.5914 - val_accuracy: 0.4146 - lr: 5.0000e-04
Epoch 3/30
707/708 [============================>.] - ETA: 0s - loss: 1.5113 - accuracy: 0.4379
Epoch 3: val_loss improved from 1.59138 to 1.41898, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 1.5110 - accuracy: 0.4380 - val_loss: 1.4190 - val_accuracy: 0.4787 - lr: 5.0000e-04
Epoch 4/30
707/708 [============================>.] - ETA: 0s - loss: 1.3195 - accuracy: 0.5036
Epoch 4: val_loss improved from 1.41898 to 1.12246, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 1.3194 - accuracy: 0.5036 - val_loss: 1.1225 - val_accuracy: 0.5725 - lr: 5.0000e-04
Epoch 5/30
706/708 [============================>.] - ETA: 0s - loss: 1.1660 - accuracy: 0.5620
Epoch 5: val_loss improved from 1.12246 to 1.02669, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 1.1657 - accuracy: 0.5621 - val_loss: 1.0267 - val_accuracy: 0.6063 - lr: 5.0000e-04
Epoch 6/30
708/708 [==============================] - ETA: 0s - loss: 1.0565 - accuracy: 0.6026
Epoch 6: val_loss improved from 1.02669 to 0.94135, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 1.0565 - accuracy: 0.6026 - val_loss: 0.9414 - val_accuracy: 0.6458 - lr: 5.0000e-04
Epoch 7/30
706/708 [============================>.] - ETA: 0s - loss: 0.9649 - accuracy: 0.6388
Epoch 7: val_loss improved from 0.94135 to 0.88472, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.9649 - accuracy: 0.6388 - val_loss: 0.8847 - val_accuracy: 0.6700 - lr: 5.0000e-04
Epoch 8/30
707/708 [============================>.] - ETA: 0s - loss: 0.8809 - accuracy: 0.6709
Epoch 8: val_loss improved from 0.88472 to 0.85902, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.8810 - accuracy: 0.6708 - val_loss: 0.8590 - val_accuracy: 0.6782 - lr: 5.0000e-04
Epoch 9/30
707/708 [============================>.] - ETA: 0s - loss: 0.8060 - accuracy: 0.7020
Epoch 9: val_loss improved from 0.85902 to 0.83001, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.8057 - accuracy: 0.7021 - val_loss: 0.8300 - val_accuracy: 0.6981 - lr: 5.0000e-04
Epoch 10/30
706/708 [============================>.] - ETA: 0s - loss: 0.7378 - accuracy: 0.7281
Epoch 10: val_loss improved from 0.83001 to 0.74577, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.7380 - accuracy: 0.7280 - val_loss: 0.7458 - val_accuracy: 0.7263 - lr: 5.0000e-04
Epoch 11/30
707/708 [============================>.] - ETA: 0s - loss: 0.6707 - accuracy: 0.7559
Epoch 11: val_loss did not improve from 0.74577
708/708 [==============================] - 19s 27ms/step - loss: 0.6711 - accuracy: 0.7557 - val_loss: 0.8433 - val_accuracy: 0.6881 - lr: 5.0000e-04
Epoch 12/30
708/708 [==============================] - ETA: 0s - loss: 0.5997 - accuracy: 0.7840
Epoch 12: val_loss improved from 0.74577 to 0.65467, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.5997 - accuracy: 0.7840 - val_loss: 0.6547 - val_accuracy: 0.7676 - lr: 5.0000e-04
Epoch 13/30
707/708 [============================>.] - ETA: 0s - loss: 0.5458 - accuracy: 0.8034
Epoch 13: val_loss improved from 0.65467 to 0.62948, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.5458 - accuracy: 0.8033 - val_loss: 0.6295 - val_accuracy: 0.7892 - lr: 5.0000e-04
Epoch 14/30
707/708 [============================>.] - ETA: 0s - loss: 0.4919 - accuracy: 0.8235
Epoch 14: val_loss improved from 0.62948 to 0.60042, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.4919 - accuracy: 0.8235 - val_loss: 0.6004 - val_accuracy: 0.8004 - lr: 5.0000e-04
Epoch 15/30
707/708 [============================>.] - ETA: 0s - loss: 0.4494 - accuracy: 0.8392
Epoch 15: val_loss did not improve from 0.60042
708/708 [==============================] - 19s 27ms/step - loss: 0.4495 - accuracy: 0.8392 - val_loss: 0.6400 - val_accuracy: 0.7886 - lr: 5.0000e-04
Epoch 16/30
707/708 [============================>.] - ETA: 0s - loss: 0.4043 - accuracy: 0.8567
Epoch 16: val_loss improved from 0.60042 to 0.59203, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.4045 - accuracy: 0.8567 - val_loss: 0.5920 - val_accuracy: 0.8147 - lr: 5.0000e-04
Epoch 17/30
707/708 [============================>.] - ETA: 0s - loss: 0.3703 - accuracy: 0.8673
Epoch 17: val_loss improved from 0.59203 to 0.58432, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.3702 - accuracy: 0.8673 - val_loss: 0.5843 - val_accuracy: 0.8206 - lr: 5.0000e-04
Epoch 18/30
708/708 [==============================] - ETA: 0s - loss: 0.3351 - accuracy: 0.8824
Epoch 18: val_loss did not improve from 0.58432
708/708 [==============================] - 19s 27ms/step - loss: 0.3351 - accuracy: 0.8824 - val_loss: 0.6047 - val_accuracy: 0.8230 - lr: 5.0000e-04
Epoch 19/30
707/708 [============================>.] - ETA: 0s - loss: 0.3089 - accuracy: 0.8913
Epoch 19: val_loss did not improve from 0.58432
708/708 [==============================] - 19s 27ms/step - loss: 0.3089 - accuracy: 0.8914 - val_loss: 0.6208 - val_accuracy: 0.8218 - lr: 5.0000e-04
Epoch 20/30
707/708 [============================>.] - ETA: 0s - loss: 0.2829 - accuracy: 0.9015
Epoch 20: val_loss did not improve from 0.58432

Epoch 20: ReduceLROnPlateau reducing learning rate to 0.00015811389051842542.
708/708 [==============================] - 19s 27ms/step - loss: 0.2829 - accuracy: 0.9015 - val_loss: 0.6188 - val_accuracy: 0.8250 - lr: 5.0000e-04
Epoch 21/30
706/708 [============================>.] - ETA: 0s - loss: 0.2224 - accuracy: 0.9219
Epoch 21: val_loss improved from 0.58432 to 0.58225, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.2222 - accuracy: 0.9220 - val_loss: 0.5823 - val_accuracy: 0.8393 - lr: 1.5811e-04
Epoch 22/30
707/708 [============================>.] - ETA: 0s - loss: 0.1973 - accuracy: 0.9305
Epoch 22: val_loss did not improve from 0.58225
708/708 [==============================] - 19s 27ms/step - loss: 0.1973 - accuracy: 0.9304 - val_loss: 0.5971 - val_accuracy: 0.8413 - lr: 1.5811e-04
Epoch 23/30
707/708 [============================>.] - ETA: 0s - loss: 0.1748 - accuracy: 0.9385
Epoch 23: val_loss did not improve from 0.58225
708/708 [==============================] - 19s 27ms/step - loss: 0.1747 - accuracy: 0.9386 - val_loss: 0.6127 - val_accuracy: 0.8405 - lr: 1.5811e-04
Epoch 24/30
707/708 [============================>.] - ETA: 0s - loss: 0.1646 - accuracy: 0.9440
Epoch 24: val_loss did not improve from 0.58225

Epoch 24: ReduceLROnPlateau reducing learning rate to 5.0000003198030994e-05.
708/708 [==============================] - 19s 27ms/step - loss: 0.1645 - accuracy: 0.9440 - val_loss: 0.6276 - val_accuracy: 0.8433 - lr: 1.5811e-04
Epoch 25/30
707/708 [============================>.] - ETA: 0s - loss: 0.1537 - accuracy: 0.9456
Epoch 25: val_loss did not improve from 0.58225
708/708 [==============================] - 19s 27ms/step - loss: 0.1537 - accuracy: 0.9456 - val_loss: 0.6209 - val_accuracy: 0.8437 - lr: 5.0000e-05
Epoch 26/30
706/708 [============================>.] - ETA: 0s - loss: 0.1469 - accuracy: 0.9497
Epoch 26: val_loss did not improve from 0.58225
708/708 [==============================] - 19s 27ms/step - loss: 0.1472 - accuracy: 0.9495 - val_loss: 0.6207 - val_accuracy: 0.8439 - lr: 5.0000e-05
Epoch 27/30
707/708 [============================>.] - ETA: 0s - loss: 0.1393 - accuracy: 0.9522
Epoch 27: val_loss did not improve from 0.58225

Epoch 27: ReduceLROnPlateau reducing learning rate to 1.5811389051842542e-05.
708/708 [==============================] - 19s 27ms/step - loss: 0.1393 - accuracy: 0.9522 - val_loss: 0.6223 - val_accuracy: 0.8421 - lr: 5.0000e-05
Epoch 28/30
708/708 [==============================] - ETA: 0s - loss: 0.1353 - accuracy: 0.9539
Epoch 28: val_loss did not improve from 0.58225
708/708 [==============================] - 19s 27ms/step - loss: 0.1353 - accuracy: 0.9539 - val_loss: 0.6233 - val_accuracy: 0.8431 - lr: 1.5811e-05
Epoch 29/30
707/708 [============================>.] - ETA: 0s - loss: 0.1344 - accuracy: 0.9540
Epoch 29: val_loss did not improve from 0.58225
708/708 [==============================] - 19s 27ms/step - loss: 0.1344 - accuracy: 0.9540 - val_loss: 0.6238 - val_accuracy: 0.8433 - lr: 1.5811e-05
Epoch 30/30
707/708 [============================>.] - ETA: 0s - loss: 0.1320 - accuracy: 0.9541
Epoch 30: val_loss did not improve from 0.58225

Epoch 30: ReduceLROnPlateau reducing learning rate to 5.000000204760109e-06.
708/708 [==============================] - 19s 27ms/step - loss: 0.1319 - accuracy: 0.9541 - val_loss: 0.6269 - val_accuracy: 0.8439 - lr: 1.5811e-05
394/394 [==============================] - 2s 6ms/step - loss: 0.6537 - accuracy: 0.8445
Out[ ]:
[0.6536846160888672, 0.8444974422454834]
Combined Model 3 Results
In [ ]:
plot_model_history(history7, name="Combined Model 3")
Min validation loss: 0.5822529792785645 
Max validation loss: 2.038146734237671 
Min validation acc: 0.25148987770080566 
Max validation acc: 0.8438617587089539
Model 2 vs Combined Model 3
In [ ]:
compare_model_history(history2, history7, name="Model 2 vs Combined Model 3", key1="Model 2", key2="Combined Model 3")
Model 2 validation loss: 0.5889896154403687 
Combined Model 3 validation loss: 0.5822529792785645

Model 2 validation accuracy: 0.8458482027053833 
Combined Model 3 validation accuracy: 0.8438617587089539

Final Combined Model

In [ ]:
def final_combined_model():
  model = models.Sequential()

  # 1st convolutional layer
  model.add(layers.Conv2D(64, kernel_size=3, strides=1, padding='same', activation='relu', input_shape=(x_train.shape[1:])))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  model.add(layers.Dropout(0.3))

  # 2nd convolutional layer
  model.add(layers.Conv2D(128, kernel_size=5, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  model.add(layers.Dropout(0.3))

  # 3rd convolutional layer
  model.add(layers.Conv2D(512, kernel_size=3, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  model.add(layers.Dropout(0.3))

  # 4th convolutional layer
  model.add(layers.Conv2D(512, kernel_size=3, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  model.add(layers.Dropout(0.3))
  model.add(layers.Flatten())

  # 1st fully connected dense layer
  model.add(layers.Dense(256, activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.Dropout(0.3))

  # 2nd fully connected dense layer
  model.add(layers.Dense(512, activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.Dropout(0.3))

  # final dense layer with number of classes
  model.add(layers.Dense(7, activation='softmax'))

  # compile the model
  model.compile(optimizer=optimizers.Adamax(0.0005), loss='categorical_crossentropy', metrics=['accuracy'])
  return model
In [ ]:
model = final_combined_model()
plot_model(model, to_file="model.png", show_shapes=True, show_layer_names=True)
Out[ ]:
In [ ]:
history8 = train_model(model, 30, 64)
model.evaluate(x_test, y_test)
Epoch 1/30
707/708 [============================>.] - ETA: 0s - loss: 2.2181 - accuracy: 0.2147
Epoch 1: val_loss improved from inf to 1.75674, saving model to best-model.h5
708/708 [==============================] - 20s 26ms/step - loss: 2.2177 - accuracy: 0.2149 - val_loss: 1.7567 - val_accuracy: 0.3117 - lr: 5.0000e-04
Epoch 2/30
707/708 [============================>.] - ETA: 0s - loss: 1.8615 - accuracy: 0.3190
Epoch 2: val_loss improved from 1.75674 to 1.53602, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 1.8609 - accuracy: 0.3193 - val_loss: 1.5360 - val_accuracy: 0.4213 - lr: 5.0000e-04
Epoch 3/30
708/708 [==============================] - ETA: 0s - loss: 1.5475 - accuracy: 0.4253
Epoch 3: val_loss improved from 1.53602 to 1.30146, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 1.5475 - accuracy: 0.4253 - val_loss: 1.3015 - val_accuracy: 0.5095 - lr: 5.0000e-04
Epoch 4/30
707/708 [============================>.] - ETA: 0s - loss: 1.3409 - accuracy: 0.4992
Epoch 4: val_loss improved from 1.30146 to 1.14279, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 1.3407 - accuracy: 0.4993 - val_loss: 1.1428 - val_accuracy: 0.5656 - lr: 5.0000e-04
Epoch 5/30
707/708 [============================>.] - ETA: 0s - loss: 1.1905 - accuracy: 0.5512
Epoch 5: val_loss improved from 1.14279 to 1.00718, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 1.1906 - accuracy: 0.5512 - val_loss: 1.0072 - val_accuracy: 0.6206 - lr: 5.0000e-04
Epoch 6/30
707/708 [============================>.] - ETA: 0s - loss: 1.0822 - accuracy: 0.5924
Epoch 6: val_loss improved from 1.00718 to 0.97151, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 1.0822 - accuracy: 0.5924 - val_loss: 0.9715 - val_accuracy: 0.6313 - lr: 5.0000e-04
Epoch 7/30
706/708 [============================>.] - ETA: 0s - loss: 0.9955 - accuracy: 0.6273
Epoch 7: val_loss improved from 0.97151 to 0.87965, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 0.9955 - accuracy: 0.6273 - val_loss: 0.8796 - val_accuracy: 0.6571 - lr: 5.0000e-04
Epoch 8/30
708/708 [==============================] - ETA: 0s - loss: 0.9230 - accuracy: 0.6549
Epoch 8: val_loss did not improve from 0.87965
708/708 [==============================] - 18s 26ms/step - loss: 0.9230 - accuracy: 0.6549 - val_loss: 0.8833 - val_accuracy: 0.6667 - lr: 5.0000e-04
Epoch 9/30
707/708 [============================>.] - ETA: 0s - loss: 0.8538 - accuracy: 0.6818
Epoch 9: val_loss improved from 0.87965 to 0.82398, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 0.8540 - accuracy: 0.6816 - val_loss: 0.8240 - val_accuracy: 0.6895 - lr: 5.0000e-04
Epoch 10/30
706/708 [============================>.] - ETA: 0s - loss: 0.7934 - accuracy: 0.7051
Epoch 10: val_loss improved from 0.82398 to 0.77713, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 0.7932 - accuracy: 0.7052 - val_loss: 0.7771 - val_accuracy: 0.7090 - lr: 5.0000e-04
Epoch 11/30
707/708 [============================>.] - ETA: 0s - loss: 0.7348 - accuracy: 0.7317
Epoch 11: val_loss did not improve from 0.77713
708/708 [==============================] - 18s 26ms/step - loss: 0.7347 - accuracy: 0.7317 - val_loss: 0.7794 - val_accuracy: 0.7114 - lr: 5.0000e-04
Epoch 12/30
706/708 [============================>.] - ETA: 0s - loss: 0.6830 - accuracy: 0.7482
Epoch 12: val_loss improved from 0.77713 to 0.69279, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 0.6826 - accuracy: 0.7483 - val_loss: 0.6928 - val_accuracy: 0.7471 - lr: 5.0000e-04
Epoch 13/30
708/708 [==============================] - ETA: 0s - loss: 0.6386 - accuracy: 0.7666
Epoch 13: val_loss did not improve from 0.69279
708/708 [==============================] - 18s 26ms/step - loss: 0.6386 - accuracy: 0.7666 - val_loss: 0.6931 - val_accuracy: 0.7485 - lr: 5.0000e-04
Epoch 14/30
706/708 [============================>.] - ETA: 0s - loss: 0.5910 - accuracy: 0.7849
Epoch 14: val_loss improved from 0.69279 to 0.63149, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 0.5912 - accuracy: 0.7849 - val_loss: 0.6315 - val_accuracy: 0.7795 - lr: 5.0000e-04
Epoch 15/30
706/708 [============================>.] - ETA: 0s - loss: 0.5432 - accuracy: 0.8024
Epoch 15: val_loss did not improve from 0.63149
708/708 [==============================] - 18s 26ms/step - loss: 0.5435 - accuracy: 0.8024 - val_loss: 0.6373 - val_accuracy: 0.7797 - lr: 5.0000e-04
Epoch 16/30
706/708 [============================>.] - ETA: 0s - loss: 0.5119 - accuracy: 0.8177
Epoch 16: val_loss improved from 0.63149 to 0.61047, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 0.5119 - accuracy: 0.8177 - val_loss: 0.6105 - val_accuracy: 0.7936 - lr: 5.0000e-04
Epoch 17/30
707/708 [============================>.] - ETA: 0s - loss: 0.4713 - accuracy: 0.8303
Epoch 17: val_loss improved from 0.61047 to 0.60110, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 0.4713 - accuracy: 0.8303 - val_loss: 0.6011 - val_accuracy: 0.8073 - lr: 5.0000e-04
Epoch 18/30
707/708 [============================>.] - ETA: 0s - loss: 0.4359 - accuracy: 0.8442
Epoch 18: val_loss did not improve from 0.60110
708/708 [==============================] - 18s 26ms/step - loss: 0.4363 - accuracy: 0.8441 - val_loss: 0.6074 - val_accuracy: 0.8021 - lr: 5.0000e-04
Epoch 19/30
708/708 [==============================] - ETA: 0s - loss: 0.4173 - accuracy: 0.8523
Epoch 19: val_loss did not improve from 0.60110
708/708 [==============================] - 18s 26ms/step - loss: 0.4173 - accuracy: 0.8523 - val_loss: 0.6035 - val_accuracy: 0.8089 - lr: 5.0000e-04
Epoch 20/30
706/708 [============================>.] - ETA: 0s - loss: 0.3870 - accuracy: 0.8610
Epoch 20: val_loss improved from 0.60110 to 0.58920, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 0.3869 - accuracy: 0.8611 - val_loss: 0.5892 - val_accuracy: 0.8202 - lr: 5.0000e-04
Epoch 21/30
708/708 [==============================] - ETA: 0s - loss: 0.3539 - accuracy: 0.8745
Epoch 21: val_loss improved from 0.58920 to 0.57808, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 0.3539 - accuracy: 0.8745 - val_loss: 0.5781 - val_accuracy: 0.8212 - lr: 5.0000e-04
Epoch 22/30
707/708 [============================>.] - ETA: 0s - loss: 0.3414 - accuracy: 0.8769
Epoch 22: val_loss improved from 0.57808 to 0.57342, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 0.3415 - accuracy: 0.8768 - val_loss: 0.5734 - val_accuracy: 0.8329 - lr: 5.0000e-04
Epoch 23/30
708/708 [==============================] - ETA: 0s - loss: 0.3201 - accuracy: 0.8852
Epoch 23: val_loss improved from 0.57342 to 0.56806, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 0.3201 - accuracy: 0.8852 - val_loss: 0.5681 - val_accuracy: 0.8313 - lr: 5.0000e-04
Epoch 24/30
708/708 [==============================] - ETA: 0s - loss: 0.3036 - accuracy: 0.8944
Epoch 24: val_loss did not improve from 0.56806
708/708 [==============================] - 18s 26ms/step - loss: 0.3036 - accuracy: 0.8944 - val_loss: 0.5714 - val_accuracy: 0.8317 - lr: 5.0000e-04
Epoch 25/30
706/708 [============================>.] - ETA: 0s - loss: 0.2839 - accuracy: 0.9003
Epoch 25: val_loss did not improve from 0.56806
708/708 [==============================] - 18s 26ms/step - loss: 0.2839 - accuracy: 0.9002 - val_loss: 0.5808 - val_accuracy: 0.8333 - lr: 5.0000e-04
Epoch 26/30
706/708 [============================>.] - ETA: 0s - loss: 0.2714 - accuracy: 0.9026
Epoch 26: val_loss did not improve from 0.56806

Epoch 26: ReduceLROnPlateau reducing learning rate to 0.00015811389051842542.
708/708 [==============================] - 18s 26ms/step - loss: 0.2713 - accuracy: 0.9025 - val_loss: 0.5834 - val_accuracy: 0.8353 - lr: 5.0000e-04
Epoch 27/30
706/708 [============================>.] - ETA: 0s - loss: 0.2267 - accuracy: 0.9189
Epoch 27: val_loss did not improve from 0.56806
708/708 [==============================] - 18s 26ms/step - loss: 0.2265 - accuracy: 0.9190 - val_loss: 0.5733 - val_accuracy: 0.8425 - lr: 1.5811e-04
Epoch 28/30
706/708 [============================>.] - ETA: 0s - loss: 0.2006 - accuracy: 0.9290
Epoch 28: val_loss did not improve from 0.56806
708/708 [==============================] - 18s 26ms/step - loss: 0.2006 - accuracy: 0.9289 - val_loss: 0.5730 - val_accuracy: 0.8423 - lr: 1.5811e-04
Epoch 29/30
707/708 [============================>.] - ETA: 0s - loss: 0.1969 - accuracy: 0.9299
Epoch 29: val_loss did not improve from 0.56806

Epoch 29: ReduceLROnPlateau reducing learning rate to 5.0000003198030994e-05.
708/708 [==============================] - 18s 26ms/step - loss: 0.1968 - accuracy: 0.9299 - val_loss: 0.5784 - val_accuracy: 0.8447 - lr: 1.5811e-04
Epoch 30/30
706/708 [============================>.] - ETA: 0s - loss: 0.1796 - accuracy: 0.9374
Epoch 30: val_loss did not improve from 0.56806
708/708 [==============================] - 18s 26ms/step - loss: 0.1797 - accuracy: 0.9374 - val_loss: 0.5856 - val_accuracy: 0.8453 - lr: 5.0000e-05
394/394 [==============================] - 2s 6ms/step - loss: 0.6120 - accuracy: 0.8457
Out[ ]:
[0.6120489835739136, 0.8456892967224121]
Final Combination Results
In [ ]:
plot_model_history(history8, name="Final Combined Model")
Min validation loss: 0.5680626630783081 
Max validation loss: 1.7567416429519653 
Min validation acc: 0.3116805851459503 
Max validation acc: 0.845252275466919
Model 2 vs Final Combined Model
In [ ]:
compare_model_history(history2, history8, name="Model 2 vs Final Combined Model", key1="Model 2", key2="Final Combined Model")
Model 2 validation loss: 0.5889896154403687 
Final Combined Model validation loss: 0.5680626630783081

Model 2 validation accuracy: 0.8458482027053833 
Final Combined Model validation accuracy: 0.845252275466919

Final Combined Model with Data Augmentation

In [53]:
train_datagen = ImageDataGenerator(
    rotation_range=10, # rotating images randomly from 0 to 180 degrees
    zoom_range = 0.1, # zooming into the image
    width_shift_range=0.1, # horizontally shifting the images
    height_shift_range=0.1, # vertically shifting the images
    horizontal_flip=True, # allows horizontal flipping of images
    vertical_flip=True, # allows vertical flipping of images
    fill_mode='nearest') # replaces the empty area with the nearest pixel values

plot_augmentation_samples(train_datagen)
Data Augmentation Samples has been saved
In [ ]:
model = final_combined_model()

history9 = train_augmented_model(model, train_datagen, 80, 128)
model.evaluate(x_test, y_test)
Epoch 1/80
352/353 [============================>.] - ETA: 0s - loss: 2.2990 - accuracy: 0.1946
Epoch 1: val_loss improved from inf to 2.01496, saving model to best-model.h5
353/353 [==============================] - 18s 48ms/step - loss: 2.2985 - accuracy: 0.1949 - val_loss: 2.0150 - val_accuracy: 0.1561 - lr: 5.0000e-04
Epoch 2/80
352/353 [============================>.] - ETA: 0s - loss: 2.0626 - accuracy: 0.2353
Epoch 2: val_loss improved from 2.01496 to 1.73165, saving model to best-model.h5
353/353 [==============================] - 17s 47ms/step - loss: 2.0623 - accuracy: 0.2353 - val_loss: 1.7317 - val_accuracy: 0.3172 - lr: 5.0000e-04
Epoch 3/80
352/353 [============================>.] - ETA: 0s - loss: 1.9164 - accuracy: 0.2801
Epoch 3: val_loss improved from 1.73165 to 1.64016, saving model to best-model.h5
353/353 [==============================] - 17s 47ms/step - loss: 1.9161 - accuracy: 0.2802 - val_loss: 1.6402 - val_accuracy: 0.3659 - lr: 5.0000e-04
Epoch 4/80
352/353 [============================>.] - ETA: 0s - loss: 1.7677 - accuracy: 0.3395
Epoch 4: val_loss improved from 1.64016 to 1.49275, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 1.7675 - accuracy: 0.3396 - val_loss: 1.4928 - val_accuracy: 0.4327 - lr: 5.0000e-04
Epoch 5/80
352/353 [============================>.] - ETA: 0s - loss: 1.6103 - accuracy: 0.3955
Epoch 5: val_loss improved from 1.49275 to 1.36513, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 1.6103 - accuracy: 0.3955 - val_loss: 1.3651 - val_accuracy: 0.4831 - lr: 5.0000e-04
Epoch 6/80
352/353 [============================>.] - ETA: 0s - loss: 1.4845 - accuracy: 0.4447
Epoch 6: val_loss improved from 1.36513 to 1.33193, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 1.4841 - accuracy: 0.4449 - val_loss: 1.3319 - val_accuracy: 0.4940 - lr: 5.0000e-04
Epoch 7/80
353/353 [==============================] - ETA: 0s - loss: 1.3909 - accuracy: 0.4761
Epoch 7: val_loss improved from 1.33193 to 1.17264, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 1.3909 - accuracy: 0.4761 - val_loss: 1.1726 - val_accuracy: 0.5501 - lr: 5.0000e-04
Epoch 8/80
352/353 [============================>.] - ETA: 0s - loss: 1.3007 - accuracy: 0.5100
Epoch 8: val_loss improved from 1.17264 to 1.10067, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 1.3003 - accuracy: 0.5102 - val_loss: 1.1007 - val_accuracy: 0.5836 - lr: 5.0000e-04
Epoch 9/80
352/353 [============================>.] - ETA: 0s - loss: 1.2339 - accuracy: 0.5345
Epoch 9: val_loss improved from 1.10067 to 1.08392, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 1.2342 - accuracy: 0.5344 - val_loss: 1.0839 - val_accuracy: 0.5910 - lr: 5.0000e-04
Epoch 10/80
352/353 [============================>.] - ETA: 0s - loss: 1.1794 - accuracy: 0.5553
Epoch 10: val_loss improved from 1.08392 to 1.01805, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 1.1792 - accuracy: 0.5554 - val_loss: 1.0181 - val_accuracy: 0.6150 - lr: 5.0000e-04
Epoch 11/80
353/353 [==============================] - ETA: 0s - loss: 1.1296 - accuracy: 0.5733
Epoch 11: val_loss improved from 1.01805 to 1.01225, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 1.1296 - accuracy: 0.5733 - val_loss: 1.0123 - val_accuracy: 0.6182 - lr: 5.0000e-04
Epoch 12/80
352/353 [============================>.] - ETA: 0s - loss: 1.0840 - accuracy: 0.5930
Epoch 12: val_loss improved from 1.01225 to 0.92594, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 1.0841 - accuracy: 0.5930 - val_loss: 0.9259 - val_accuracy: 0.6490 - lr: 5.0000e-04
Epoch 13/80
352/353 [============================>.] - ETA: 0s - loss: 1.0420 - accuracy: 0.6080
Epoch 13: val_loss improved from 0.92594 to 0.90837, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 1.0422 - accuracy: 0.6079 - val_loss: 0.9084 - val_accuracy: 0.6498 - lr: 5.0000e-04
Epoch 14/80
352/353 [============================>.] - ETA: 0s - loss: 1.0128 - accuracy: 0.6202
Epoch 14: val_loss improved from 0.90837 to 0.88112, saving model to best-model.h5
353/353 [==============================] - 17s 47ms/step - loss: 1.0125 - accuracy: 0.6203 - val_loss: 0.8811 - val_accuracy: 0.6659 - lr: 5.0000e-04
Epoch 15/80
352/353 [============================>.] - ETA: 0s - loss: 0.9844 - accuracy: 0.6321
Epoch 15: val_loss did not improve from 0.88112
353/353 [==============================] - 17s 47ms/step - loss: 0.9841 - accuracy: 0.6320 - val_loss: 0.9076 - val_accuracy: 0.6476 - lr: 5.0000e-04
Epoch 16/80
353/353 [==============================] - ETA: 0s - loss: 0.9539 - accuracy: 0.6432
Epoch 16: val_loss improved from 0.88112 to 0.87470, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 0.9539 - accuracy: 0.6432 - val_loss: 0.8747 - val_accuracy: 0.6692 - lr: 5.0000e-04
Epoch 17/80
353/353 [==============================] - ETA: 0s - loss: 0.9274 - accuracy: 0.6532
Epoch 17: val_loss improved from 0.87470 to 0.80462, saving model to best-model.h5
353/353 [==============================] - 17s 47ms/step - loss: 0.9274 - accuracy: 0.6532 - val_loss: 0.8046 - val_accuracy: 0.6937 - lr: 5.0000e-04
Epoch 18/80
353/353 [==============================] - ETA: 0s - loss: 0.9037 - accuracy: 0.6609
Epoch 18: val_loss did not improve from 0.80462
353/353 [==============================] - 17s 47ms/step - loss: 0.9037 - accuracy: 0.6609 - val_loss: 0.8267 - val_accuracy: 0.6832 - lr: 5.0000e-04
Epoch 19/80
353/353 [==============================] - ETA: 0s - loss: 0.8782 - accuracy: 0.6686
Epoch 19: val_loss did not improve from 0.80462
353/353 [==============================] - 17s 49ms/step - loss: 0.8782 - accuracy: 0.6686 - val_loss: 0.8285 - val_accuracy: 0.6885 - lr: 5.0000e-04
Epoch 20/80
353/353 [==============================] - ETA: 0s - loss: 0.8548 - accuracy: 0.6797
Epoch 20: val_loss improved from 0.80462 to 0.79782, saving model to best-model.h5
353/353 [==============================] - 18s 52ms/step - loss: 0.8548 - accuracy: 0.6797 - val_loss: 0.7978 - val_accuracy: 0.7034 - lr: 5.0000e-04
Epoch 21/80
353/353 [==============================] - ETA: 0s - loss: 0.8389 - accuracy: 0.6863
Epoch 21: val_loss improved from 0.79782 to 0.79200, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 0.8389 - accuracy: 0.6863 - val_loss: 0.7920 - val_accuracy: 0.7028 - lr: 5.0000e-04
Epoch 22/80
353/353 [==============================] - ETA: 0s - loss: 0.8217 - accuracy: 0.6922
Epoch 22: val_loss improved from 0.79200 to 0.74942, saving model to best-model.h5
353/353 [==============================] - 17s 47ms/step - loss: 0.8217 - accuracy: 0.6922 - val_loss: 0.7494 - val_accuracy: 0.7149 - lr: 5.0000e-04
Epoch 23/80
352/353 [============================>.] - ETA: 0s - loss: 0.8048 - accuracy: 0.6990
Epoch 23: val_loss improved from 0.74942 to 0.74191, saving model to best-model.h5
353/353 [==============================] - 18s 52ms/step - loss: 0.8046 - accuracy: 0.6991 - val_loss: 0.7419 - val_accuracy: 0.7189 - lr: 5.0000e-04
Epoch 24/80
353/353 [==============================] - ETA: 0s - loss: 0.7847 - accuracy: 0.7069
Epoch 24: val_loss improved from 0.74191 to 0.71948, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 0.7847 - accuracy: 0.7069 - val_loss: 0.7195 - val_accuracy: 0.7332 - lr: 5.0000e-04
Epoch 25/80
352/353 [============================>.] - ETA: 0s - loss: 0.7719 - accuracy: 0.7149
Epoch 25: val_loss did not improve from 0.71948
353/353 [==============================] - 17s 47ms/step - loss: 0.7722 - accuracy: 0.7148 - val_loss: 0.7429 - val_accuracy: 0.7267 - lr: 5.0000e-04
Epoch 26/80
352/353 [============================>.] - ETA: 0s - loss: 0.7532 - accuracy: 0.7193
Epoch 26: val_loss improved from 0.71948 to 0.71428, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 0.7534 - accuracy: 0.7194 - val_loss: 0.7143 - val_accuracy: 0.7346 - lr: 5.0000e-04
Epoch 27/80
352/353 [============================>.] - ETA: 0s - loss: 0.7440 - accuracy: 0.7222
Epoch 27: val_loss improved from 0.71428 to 0.70860, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 0.7440 - accuracy: 0.7222 - val_loss: 0.7086 - val_accuracy: 0.7414 - lr: 5.0000e-04
Epoch 28/80
353/353 [==============================] - ETA: 0s - loss: 0.7244 - accuracy: 0.7311
Epoch 28: val_loss improved from 0.70860 to 0.69429, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 0.7244 - accuracy: 0.7311 - val_loss: 0.6943 - val_accuracy: 0.7426 - lr: 5.0000e-04
Epoch 29/80
352/353 [============================>.] - ETA: 0s - loss: 0.7131 - accuracy: 0.7390
Epoch 29: val_loss did not improve from 0.69429
353/353 [==============================] - 17s 47ms/step - loss: 0.7130 - accuracy: 0.7391 - val_loss: 0.6948 - val_accuracy: 0.7455 - lr: 5.0000e-04
Epoch 30/80
352/353 [============================>.] - ETA: 0s - loss: 0.7028 - accuracy: 0.7398
Epoch 30: val_loss improved from 0.69429 to 0.67519, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 0.7026 - accuracy: 0.7398 - val_loss: 0.6752 - val_accuracy: 0.7501 - lr: 5.0000e-04
Epoch 31/80
353/353 [==============================] - ETA: 0s - loss: 0.6849 - accuracy: 0.7454
Epoch 31: val_loss improved from 0.67519 to 0.66594, saving model to best-model.h5
353/353 [==============================] - 17s 47ms/step - loss: 0.6849 - accuracy: 0.7454 - val_loss: 0.6659 - val_accuracy: 0.7604 - lr: 5.0000e-04
Epoch 32/80
353/353 [==============================] - ETA: 0s - loss: 0.6769 - accuracy: 0.7512
Epoch 32: val_loss did not improve from 0.66594
353/353 [==============================] - 17s 47ms/step - loss: 0.6769 - accuracy: 0.7512 - val_loss: 0.6935 - val_accuracy: 0.7465 - lr: 5.0000e-04
Epoch 33/80
353/353 [==============================] - ETA: 0s - loss: 0.6687 - accuracy: 0.7521
Epoch 33: val_loss did not improve from 0.66594
353/353 [==============================] - 17s 47ms/step - loss: 0.6687 - accuracy: 0.7521 - val_loss: 0.7284 - val_accuracy: 0.7330 - lr: 5.0000e-04
Epoch 34/80
352/353 [============================>.] - ETA: 0s - loss: 0.6512 - accuracy: 0.7608
Epoch 34: val_loss improved from 0.66594 to 0.63767, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 0.6517 - accuracy: 0.7606 - val_loss: 0.6377 - val_accuracy: 0.7737 - lr: 5.0000e-04
Epoch 35/80
352/353 [============================>.] - ETA: 0s - loss: 0.6423 - accuracy: 0.7639
Epoch 35: val_loss did not improve from 0.63767
353/353 [==============================] - 17s 47ms/step - loss: 0.6426 - accuracy: 0.7639 - val_loss: 0.6669 - val_accuracy: 0.7640 - lr: 5.0000e-04
Epoch 36/80
353/353 [==============================] - ETA: 0s - loss: 0.6342 - accuracy: 0.7652
Epoch 36: val_loss did not improve from 0.63767
353/353 [==============================] - 16s 47ms/step - loss: 0.6342 - accuracy: 0.7652 - val_loss: 0.6520 - val_accuracy: 0.7692 - lr: 5.0000e-04
Epoch 37/80
352/353 [============================>.] - ETA: 0s - loss: 0.6294 - accuracy: 0.7706
Epoch 37: val_loss improved from 0.63767 to 0.62739, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 0.6292 - accuracy: 0.7706 - val_loss: 0.6274 - val_accuracy: 0.7769 - lr: 5.0000e-04
Epoch 38/80
353/353 [==============================] - ETA: 0s - loss: 0.6098 - accuracy: 0.7767
Epoch 38: val_loss did not improve from 0.62739
353/353 [==============================] - 17s 47ms/step - loss: 0.6098 - accuracy: 0.7767 - val_loss: 0.6408 - val_accuracy: 0.7731 - lr: 5.0000e-04
Epoch 39/80
353/353 [==============================] - ETA: 0s - loss: 0.6062 - accuracy: 0.7773
Epoch 39: val_loss improved from 0.62739 to 0.61620, saving model to best-model.h5
353/353 [==============================] - 17s 47ms/step - loss: 0.6062 - accuracy: 0.7773 - val_loss: 0.6162 - val_accuracy: 0.7827 - lr: 5.0000e-04
Epoch 40/80
353/353 [==============================] - ETA: 0s - loss: 0.5910 - accuracy: 0.7863
Epoch 40: val_loss did not improve from 0.61620
353/353 [==============================] - 17s 47ms/step - loss: 0.5910 - accuracy: 0.7863 - val_loss: 0.6281 - val_accuracy: 0.7835 - lr: 5.0000e-04
Epoch 41/80
352/353 [============================>.] - ETA: 0s - loss: 0.5758 - accuracy: 0.7895
Epoch 41: val_loss improved from 0.61620 to 0.61442, saving model to best-model.h5
353/353 [==============================] - 17s 47ms/step - loss: 0.5757 - accuracy: 0.7895 - val_loss: 0.6144 - val_accuracy: 0.7878 - lr: 5.0000e-04
Epoch 42/80
352/353 [============================>.] - ETA: 0s - loss: 0.5774 - accuracy: 0.7897
Epoch 42: val_loss did not improve from 0.61442
353/353 [==============================] - 17s 47ms/step - loss: 0.5775 - accuracy: 0.7896 - val_loss: 0.6240 - val_accuracy: 0.7833 - lr: 5.0000e-04
Epoch 43/80
353/353 [==============================] - ETA: 0s - loss: 0.5667 - accuracy: 0.7936
Epoch 43: val_loss improved from 0.61442 to 0.60832, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 0.5667 - accuracy: 0.7936 - val_loss: 0.6083 - val_accuracy: 0.7912 - lr: 5.0000e-04
Epoch 44/80
352/353 [============================>.] - ETA: 0s - loss: 0.5603 - accuracy: 0.7945
Epoch 44: val_loss did not improve from 0.60832
353/353 [==============================] - 17s 47ms/step - loss: 0.5600 - accuracy: 0.7946 - val_loss: 0.6162 - val_accuracy: 0.7835 - lr: 5.0000e-04
Epoch 45/80
353/353 [==============================] - ETA: 0s - loss: 0.5484 - accuracy: 0.8005
Epoch 45: val_loss improved from 0.60832 to 0.58878, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 0.5484 - accuracy: 0.8005 - val_loss: 0.5888 - val_accuracy: 0.7988 - lr: 5.0000e-04
Epoch 46/80
353/353 [==============================] - ETA: 0s - loss: 0.5428 - accuracy: 0.8021
Epoch 46: val_loss improved from 0.58878 to 0.57702, saving model to best-model.h5
353/353 [==============================] - 17s 47ms/step - loss: 0.5428 - accuracy: 0.8021 - val_loss: 0.5770 - val_accuracy: 0.8014 - lr: 5.0000e-04
Epoch 47/80
352/353 [============================>.] - ETA: 0s - loss: 0.5332 - accuracy: 0.8066
Epoch 47: val_loss did not improve from 0.57702
353/353 [==============================] - 17s 47ms/step - loss: 0.5329 - accuracy: 0.8067 - val_loss: 0.5830 - val_accuracy: 0.8049 - lr: 5.0000e-04
Epoch 48/80
352/353 [============================>.] - ETA: 0s - loss: 0.5368 - accuracy: 0.8080
Epoch 48: val_loss did not improve from 0.57702
353/353 [==============================] - 17s 47ms/step - loss: 0.5371 - accuracy: 0.8079 - val_loss: 0.5834 - val_accuracy: 0.8063 - lr: 5.0000e-04
Epoch 49/80
352/353 [============================>.] - ETA: 0s - loss: 0.5211 - accuracy: 0.8126
Epoch 49: val_loss did not improve from 0.57702

Epoch 49: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513.
353/353 [==============================] - 17s 48ms/step - loss: 0.5212 - accuracy: 0.8126 - val_loss: 0.5917 - val_accuracy: 0.8015 - lr: 5.0000e-04
Epoch 50/80
352/353 [============================>.] - ETA: 0s - loss: 0.4957 - accuracy: 0.8201
Epoch 50: val_loss improved from 0.57702 to 0.55834, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 0.4958 - accuracy: 0.8199 - val_loss: 0.5583 - val_accuracy: 0.8065 - lr: 1.0000e-04
Epoch 51/80
353/353 [==============================] - ETA: 0s - loss: 0.4757 - accuracy: 0.8284
Epoch 51: val_loss improved from 0.55834 to 0.55793, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 0.4757 - accuracy: 0.8284 - val_loss: 0.5579 - val_accuracy: 0.8115 - lr: 1.0000e-04
Epoch 52/80
352/353 [============================>.] - ETA: 0s - loss: 0.4715 - accuracy: 0.8307
Epoch 52: val_loss improved from 0.55793 to 0.54574, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 0.4712 - accuracy: 0.8308 - val_loss: 0.5457 - val_accuracy: 0.8170 - lr: 1.0000e-04
Epoch 53/80
352/353 [============================>.] - ETA: 0s - loss: 0.4669 - accuracy: 0.8331
Epoch 53: val_loss did not improve from 0.54574
353/353 [==============================] - 17s 47ms/step - loss: 0.4666 - accuracy: 0.8331 - val_loss: 0.5460 - val_accuracy: 0.8182 - lr: 1.0000e-04
Epoch 54/80
352/353 [============================>.] - ETA: 0s - loss: 0.4595 - accuracy: 0.8341
Epoch 54: val_loss did not improve from 0.54574
353/353 [==============================] - 17s 47ms/step - loss: 0.4596 - accuracy: 0.8341 - val_loss: 0.5543 - val_accuracy: 0.8190 - lr: 1.0000e-04
Epoch 55/80
353/353 [==============================] - ETA: 0s - loss: 0.4559 - accuracy: 0.8339
Epoch 55: val_loss did not improve from 0.54574

Epoch 55: ReduceLROnPlateau reducing learning rate to 2.0000000949949027e-05.
353/353 [==============================] - 17s 47ms/step - loss: 0.4559 - accuracy: 0.8339 - val_loss: 0.5494 - val_accuracy: 0.8184 - lr: 1.0000e-04
Epoch 56/80
353/353 [==============================] - ETA: 0s - loss: 0.4493 - accuracy: 0.8379
Epoch 56: val_loss did not improve from 0.54574
353/353 [==============================] - 17s 47ms/step - loss: 0.4493 - accuracy: 0.8379 - val_loss: 0.5466 - val_accuracy: 0.8188 - lr: 2.0000e-05
Epoch 57/80
353/353 [==============================] - ETA: 0s - loss: 0.4506 - accuracy: 0.8391
Epoch 57: val_loss did not improve from 0.54574
353/353 [==============================] - 19s 53ms/step - loss: 0.4506 - accuracy: 0.8391 - val_loss: 0.5473 - val_accuracy: 0.8198 - lr: 2.0000e-05
Epoch 58/80
352/353 [============================>.] - ETA: 0s - loss: 0.4419 - accuracy: 0.8412
Epoch 58: val_loss improved from 0.54574 to 0.54567, saving model to best-model.h5

Epoch 58: ReduceLROnPlateau reducing learning rate to 4.000000262749381e-06.
353/353 [==============================] - 18s 52ms/step - loss: 0.4424 - accuracy: 0.8411 - val_loss: 0.5457 - val_accuracy: 0.8196 - lr: 2.0000e-05
Epoch 59/80
353/353 [==============================] - ETA: 0s - loss: 0.4435 - accuracy: 0.8402
Epoch 59: val_loss did not improve from 0.54567
353/353 [==============================] - 19s 52ms/step - loss: 0.4435 - accuracy: 0.8402 - val_loss: 0.5461 - val_accuracy: 0.8212 - lr: 4.0000e-06
Epoch 60/80
353/353 [==============================] - ETA: 0s - loss: 0.4487 - accuracy: 0.8385
Epoch 60: val_loss did not improve from 0.54567
353/353 [==============================] - 19s 54ms/step - loss: 0.4487 - accuracy: 0.8385 - val_loss: 0.5457 - val_accuracy: 0.8210 - lr: 4.0000e-06
Epoch 61/80
353/353 [==============================] - ETA: 0s - loss: 0.4421 - accuracy: 0.8421
Epoch 61: val_loss improved from 0.54567 to 0.54562, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 0.4421 - accuracy: 0.8421 - val_loss: 0.5456 - val_accuracy: 0.8208 - lr: 4.0000e-06
Epoch 62/80
352/353 [============================>.] - ETA: 0s - loss: 0.4382 - accuracy: 0.8401Restoring model weights from the end of the best epoch: 59.

Epoch 62: val_loss improved from 0.54562 to 0.54542, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 0.4379 - accuracy: 0.8402 - val_loss: 0.5454 - val_accuracy: 0.8212 - lr: 4.0000e-06
Epoch 62: early stopping
394/394 [==============================] - 2s 6ms/step - loss: 0.5669 - accuracy: 0.8199
Out[ ]:
[0.5668871402740479, 0.8199443817138672]
Data Augmentation Results
In [ ]:
plot_model_history(history9, name="Final Combined Model with Data Augmentation")
Min validation loss: 0.5454151034355164 
Max validation loss: 2.0149643421173096 
Min validation acc: 0.15613825619220734 
Max validation acc: 0.8212157487869263

Final Model

In [23]:
def final_model():
  model = models.Sequential()

  # 1st convolutional layer
  model.add(layers.Conv2D(64, kernel_size=3, strides=1, padding='same', activation='relu', input_shape=(x_train.shape[1:])))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  model.add(layers.Dropout(0.3))

  # 2nd convolutional layer
  model.add(layers.Conv2D(128, kernel_size=5, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  model.add(layers.Dropout(0.3))

  # 3rd convolutional layer
  model.add(layers.Conv2D(512, kernel_size=3, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  model.add(layers.Dropout(0.3))

  # 4th convolutional layer
  model.add(layers.Conv2D(512, kernel_size=3, strides=1, padding='same', activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.AveragePooling2D((2, 2)))
  model.add(layers.Dropout(0.35))
  model.add(layers.Flatten())

  # 1st fully connected dense layer
  model.add(layers.Dense(256, activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.Dropout(0.3))

  # 2nd fully connected dense layer
  model.add(layers.Dense(512, activation='relu'))
  model.add(layers.BatchNormalization())
  model.add(layers.Dropout(0.3))

  # final dense layer with number of classes
  model.add(layers.Dense(7, activation='softmax'))

  # compile the model
  model.compile(optimizer=optimizers.Adamax(learning_rate=0.0005, beta_1=0.9, beta_2=0.999), loss='categorical_crossentropy', metrics=['accuracy'])
  return model
In [24]:
model = final_model()
save_figure("Final Model Architecture")
plot_model(model, to_file="model.png", show_shapes=True, show_layer_names=True)
Final Model Architecture has been saved
Out[24]:
<Figure size 432x288 with 0 Axes>
In [195]:
history10 = train_model(model, 30, 64)
model.evaluate(x_test, y_test)
Epoch 1/30
706/708 [============================>.] - ETA: 0s - loss: 2.1827 - accuracy: 0.2345
Epoch 1: val_loss improved from inf to 1.72933, saving model to best-model.h5
708/708 [==============================] - 20s 26ms/step - loss: 2.1823 - accuracy: 0.2346 - val_loss: 1.7293 - val_accuracy: 0.3385 - lr: 5.0000e-04
Epoch 2/30
707/708 [============================>.] - ETA: 0s - loss: 1.7687 - accuracy: 0.3559
Epoch 2: val_loss improved from 1.72933 to 1.42490, saving model to best-model.h5
708/708 [==============================] - 18s 26ms/step - loss: 1.7684 - accuracy: 0.3560 - val_loss: 1.4249 - val_accuracy: 0.4495 - lr: 5.0000e-04
Epoch 3/30
707/708 [============================>.] - ETA: 0s - loss: 1.4954 - accuracy: 0.4462
Epoch 3: val_loss improved from 1.42490 to 1.31612, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 1.4952 - accuracy: 0.4463 - val_loss: 1.3161 - val_accuracy: 0.5052 - lr: 5.0000e-04
Epoch 4/30
707/708 [============================>.] - ETA: 0s - loss: 1.3044 - accuracy: 0.5140
Epoch 4: val_loss improved from 1.31612 to 1.14273, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 1.3041 - accuracy: 0.5141 - val_loss: 1.1427 - val_accuracy: 0.5598 - lr: 5.0000e-04
Epoch 5/30
707/708 [============================>.] - ETA: 0s - loss: 1.1617 - accuracy: 0.5665
Epoch 5: val_loss improved from 1.14273 to 1.08128, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 1.1618 - accuracy: 0.5665 - val_loss: 1.0813 - val_accuracy: 0.5894 - lr: 5.0000e-04
Epoch 6/30
706/708 [============================>.] - ETA: 0s - loss: 1.0592 - accuracy: 0.6032
Epoch 6: val_loss improved from 1.08128 to 0.97144, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 1.0590 - accuracy: 0.6033 - val_loss: 0.9714 - val_accuracy: 0.6299 - lr: 5.0000e-04
Epoch 7/30
706/708 [============================>.] - ETA: 0s - loss: 0.9729 - accuracy: 0.6348
Epoch 7: val_loss improved from 0.97144 to 0.91135, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 0.9732 - accuracy: 0.6347 - val_loss: 0.9114 - val_accuracy: 0.6567 - lr: 5.0000e-04
Epoch 8/30
707/708 [============================>.] - ETA: 0s - loss: 0.9062 - accuracy: 0.6605
Epoch 8: val_loss improved from 0.91135 to 0.83739, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.9061 - accuracy: 0.6604 - val_loss: 0.8374 - val_accuracy: 0.6772 - lr: 5.0000e-04
Epoch 9/30
706/708 [============================>.] - ETA: 0s - loss: 0.8453 - accuracy: 0.6838
Epoch 9: val_loss improved from 0.83739 to 0.80060, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.8453 - accuracy: 0.6839 - val_loss: 0.8006 - val_accuracy: 0.7014 - lr: 5.0000e-04
Epoch 10/30
707/708 [============================>.] - ETA: 0s - loss: 0.7855 - accuracy: 0.7096
Epoch 10: val_loss improved from 0.80060 to 0.76742, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.7854 - accuracy: 0.7096 - val_loss: 0.7674 - val_accuracy: 0.7137 - lr: 5.0000e-04
Epoch 11/30
707/708 [============================>.] - ETA: 0s - loss: 0.7308 - accuracy: 0.7313
Epoch 11: val_loss improved from 0.76742 to 0.71960, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.7308 - accuracy: 0.7312 - val_loss: 0.7196 - val_accuracy: 0.7416 - lr: 5.0000e-04
Epoch 12/30
706/708 [============================>.] - ETA: 0s - loss: 0.6822 - accuracy: 0.7495
Epoch 12: val_loss did not improve from 0.71960
708/708 [==============================] - 19s 26ms/step - loss: 0.6821 - accuracy: 0.7495 - val_loss: 0.7401 - val_accuracy: 0.7342 - lr: 5.0000e-04
Epoch 13/30
707/708 [============================>.] - ETA: 0s - loss: 0.6331 - accuracy: 0.7664
Epoch 13: val_loss improved from 0.71960 to 0.67963, saving model to best-model.h5
708/708 [==============================] - 19s 26ms/step - loss: 0.6330 - accuracy: 0.7664 - val_loss: 0.6796 - val_accuracy: 0.7624 - lr: 5.0000e-04
Epoch 14/30
706/708 [============================>.] - ETA: 0s - loss: 0.5795 - accuracy: 0.7905
Epoch 14: val_loss improved from 0.67963 to 0.66225, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.5796 - accuracy: 0.7904 - val_loss: 0.6622 - val_accuracy: 0.7781 - lr: 5.0000e-04
Epoch 15/30
706/708 [============================>.] - ETA: 0s - loss: 0.5429 - accuracy: 0.8048
Epoch 15: val_loss improved from 0.66225 to 0.62798, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.5431 - accuracy: 0.8047 - val_loss: 0.6280 - val_accuracy: 0.7829 - lr: 5.0000e-04
Epoch 16/30
707/708 [============================>.] - ETA: 0s - loss: 0.5053 - accuracy: 0.8187
Epoch 16: val_loss did not improve from 0.62798
708/708 [==============================] - 19s 26ms/step - loss: 0.5053 - accuracy: 0.8188 - val_loss: 0.6430 - val_accuracy: 0.7870 - lr: 5.0000e-04
Epoch 17/30
707/708 [============================>.] - ETA: 0s - loss: 0.4703 - accuracy: 0.8307
Epoch 17: val_loss improved from 0.62798 to 0.61038, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.4705 - accuracy: 0.8307 - val_loss: 0.6104 - val_accuracy: 0.8049 - lr: 5.0000e-04
Epoch 18/30
707/708 [============================>.] - ETA: 0s - loss: 0.4470 - accuracy: 0.8394
Epoch 18: val_loss did not improve from 0.61038
708/708 [==============================] - 19s 26ms/step - loss: 0.4469 - accuracy: 0.8394 - val_loss: 0.6176 - val_accuracy: 0.8023 - lr: 5.0000e-04
Epoch 19/30
707/708 [============================>.] - ETA: 0s - loss: 0.4152 - accuracy: 0.8506
Epoch 19: val_loss improved from 0.61038 to 0.59460, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.4151 - accuracy: 0.8506 - val_loss: 0.5946 - val_accuracy: 0.8073 - lr: 5.0000e-04
Epoch 20/30
707/708 [============================>.] - ETA: 0s - loss: 0.3914 - accuracy: 0.8594
Epoch 20: val_loss did not improve from 0.59460
708/708 [==============================] - 19s 26ms/step - loss: 0.3912 - accuracy: 0.8594 - val_loss: 0.6092 - val_accuracy: 0.8127 - lr: 5.0000e-04
Epoch 21/30
707/708 [============================>.] - ETA: 0s - loss: 0.3671 - accuracy: 0.8684
Epoch 21: val_loss did not improve from 0.59460
708/708 [==============================] - 19s 26ms/step - loss: 0.3673 - accuracy: 0.8683 - val_loss: 0.6292 - val_accuracy: 0.8159 - lr: 5.0000e-04
Epoch 22/30
707/708 [============================>.] - ETA: 0s - loss: 0.3412 - accuracy: 0.8792
Epoch 22: val_loss did not improve from 0.59460

Epoch 22: ReduceLROnPlateau reducing learning rate to 0.00015811389051842542.
708/708 [==============================] - 19s 26ms/step - loss: 0.3411 - accuracy: 0.8793 - val_loss: 0.6189 - val_accuracy: 0.8161 - lr: 5.0000e-04
Epoch 23/30
707/708 [============================>.] - ETA: 0s - loss: 0.2853 - accuracy: 0.8980
Epoch 23: val_loss improved from 0.59460 to 0.59021, saving model to best-model.h5
708/708 [==============================] - 19s 27ms/step - loss: 0.2853 - accuracy: 0.8980 - val_loss: 0.5902 - val_accuracy: 0.8304 - lr: 1.5811e-04
Epoch 24/30
706/708 [============================>.] - ETA: 0s - loss: 0.2631 - accuracy: 0.9079
Epoch 24: val_loss did not improve from 0.59021
708/708 [==============================] - 19s 26ms/step - loss: 0.2633 - accuracy: 0.9078 - val_loss: 0.5907 - val_accuracy: 0.8306 - lr: 1.5811e-04
Epoch 25/30
706/708 [============================>.] - ETA: 0s - loss: 0.2465 - accuracy: 0.9132
Epoch 25: val_loss did not improve from 0.59021
708/708 [==============================] - 19s 26ms/step - loss: 0.2464 - accuracy: 0.9133 - val_loss: 0.5942 - val_accuracy: 0.8325 - lr: 1.5811e-04
Epoch 26/30
706/708 [============================>.] - ETA: 0s - loss: 0.2339 - accuracy: 0.9190
Epoch 26: val_loss did not improve from 0.59021

Epoch 26: ReduceLROnPlateau reducing learning rate to 5.0000003198030994e-05.
708/708 [==============================] - 19s 26ms/step - loss: 0.2339 - accuracy: 0.9189 - val_loss: 0.5952 - val_accuracy: 0.8329 - lr: 1.5811e-04
Epoch 27/30
708/708 [==============================] - ETA: 0s - loss: 0.2163 - accuracy: 0.9242
Epoch 27: val_loss did not improve from 0.59021
708/708 [==============================] - 19s 26ms/step - loss: 0.2163 - accuracy: 0.9242 - val_loss: 0.5976 - val_accuracy: 0.8335 - lr: 5.0000e-05
Epoch 28/30
707/708 [============================>.] - ETA: 0s - loss: 0.2110 - accuracy: 0.9256
Epoch 28: val_loss did not improve from 0.59021
708/708 [==============================] - 19s 26ms/step - loss: 0.2111 - accuracy: 0.9257 - val_loss: 0.6002 - val_accuracy: 0.8331 - lr: 5.0000e-05
Epoch 29/30
707/708 [============================>.] - ETA: 0s - loss: 0.2089 - accuracy: 0.9263
Epoch 29: val_loss did not improve from 0.59021

Epoch 29: ReduceLROnPlateau reducing learning rate to 1.5811389051842542e-05.
708/708 [==============================] - 19s 26ms/step - loss: 0.2089 - accuracy: 0.9263 - val_loss: 0.6052 - val_accuracy: 0.8345 - lr: 5.0000e-05
Epoch 30/30
706/708 [============================>.] - ETA: 0s - loss: 0.2001 - accuracy: 0.9296
Epoch 30: val_loss did not improve from 0.59021
708/708 [==============================] - 19s 26ms/step - loss: 0.2001 - accuracy: 0.9295 - val_loss: 0.6010 - val_accuracy: 0.8359 - lr: 1.5811e-05
394/394 [==============================] - 2s 6ms/step - loss: 0.6067 - accuracy: 0.8408
Out[195]:
[0.6066526174545288, 0.8407627940177917]
In [173]:
model.save('final-model.h5', overwrite=True)

Final Model with Data Augmentation

In [55]:
train_datagen = ImageDataGenerator(
    rotation_range=10, # rotating images randomly from 0 to 180 degrees
    zoom_range = 0.1, # zooming into the image
    width_shift_range=0.1, # horizontally shifting the images
    height_shift_range=0.1, # vertically shifting the images
    horizontal_flip=False, # doesn't allow horizontal flipping of images
    vertical_flip=False, # doesn't allow vertical flipping of images
    fill_mode='nearest') # replaces the empty area with the nearest pixel values

plot_augmentation_samples(train_datagen) 
Data Augmentation Samples has been saved
In [25]:
model = final_model()

history11 = train_augmented_model(model, train_datagen, 80, 128)
model.evaluate(x_test, y_test)
Epoch 1/80
352/353 [============================>.] - ETA: 0s - loss: 2.3058 - accuracy: 0.1922
Epoch 1: val_loss improved from inf to 1.95658, saving model to best-model.h5
353/353 [==============================] - 30s 53ms/step - loss: 2.3053 - accuracy: 0.1923 - val_loss: 1.9566 - val_accuracy: 0.1522 - lr: 5.0000e-04
Epoch 2/80
352/353 [============================>.] - ETA: 0s - loss: 2.0774 - accuracy: 0.2315
Epoch 2: val_loss improved from 1.95658 to 1.76976, saving model to best-model.h5
353/353 [==============================] - 17s 47ms/step - loss: 2.0771 - accuracy: 0.2316 - val_loss: 1.7698 - val_accuracy: 0.3101 - lr: 5.0000e-04
Epoch 3/80
352/353 [============================>.] - ETA: 0s - loss: 1.9431 - accuracy: 0.2733
Epoch 3: val_loss improved from 1.76976 to 1.61022, saving model to best-model.h5
353/353 [==============================] - 17s 47ms/step - loss: 1.9431 - accuracy: 0.2732 - val_loss: 1.6102 - val_accuracy: 0.3834 - lr: 5.0000e-04
Epoch 4/80
353/353 [==============================] - ETA: 0s - loss: 1.7913 - accuracy: 0.3304
Epoch 4: val_loss improved from 1.61022 to 1.52418, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.7913 - accuracy: 0.3304 - val_loss: 1.5242 - val_accuracy: 0.4132 - lr: 5.0000e-04
Epoch 5/80
352/353 [============================>.] - ETA: 0s - loss: 1.6502 - accuracy: 0.3821
Epoch 5: val_loss improved from 1.52418 to 1.40886, saving model to best-model.h5
353/353 [==============================] - 17s 48ms/step - loss: 1.6503 - accuracy: 0.3820 - val_loss: 1.4089 - val_accuracy: 0.4716 - lr: 5.0000e-04
Epoch 6/80
353/353 [==============================] - ETA: 0s - loss: 1.5310 - accuracy: 0.4280
Epoch 6: val_loss improved from 1.40886 to 1.30394, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.5310 - accuracy: 0.4280 - val_loss: 1.3039 - val_accuracy: 0.5077 - lr: 5.0000e-04
Epoch 7/80
352/353 [============================>.] - ETA: 0s - loss: 1.4229 - accuracy: 0.4687
Epoch 7: val_loss improved from 1.30394 to 1.20467, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.4225 - accuracy: 0.4689 - val_loss: 1.2047 - val_accuracy: 0.5397 - lr: 5.0000e-04
Epoch 8/80
353/353 [==============================] - ETA: 0s - loss: 1.3379 - accuracy: 0.4969
Epoch 8: val_loss improved from 1.20467 to 1.12551, saving model to best-model.h5
353/353 [==============================] - 18s 50ms/step - loss: 1.3379 - accuracy: 0.4969 - val_loss: 1.1255 - val_accuracy: 0.5755 - lr: 5.0000e-04
Epoch 9/80
353/353 [==============================] - ETA: 0s - loss: 1.2678 - accuracy: 0.5248
Epoch 9: val_loss improved from 1.12551 to 1.06170, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.2678 - accuracy: 0.5248 - val_loss: 1.0617 - val_accuracy: 0.5975 - lr: 5.0000e-04
Epoch 10/80
352/353 [============================>.] - ETA: 0s - loss: 1.2087 - accuracy: 0.5457
Epoch 10: val_loss did not improve from 1.06170
353/353 [==============================] - 17s 48ms/step - loss: 1.2087 - accuracy: 0.5457 - val_loss: 1.1292 - val_accuracy: 0.5805 - lr: 5.0000e-04
Epoch 11/80
352/353 [============================>.] - ETA: 0s - loss: 1.1561 - accuracy: 0.5654
Epoch 11: val_loss improved from 1.06170 to 0.98079, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.1564 - accuracy: 0.5653 - val_loss: 0.9808 - val_accuracy: 0.6240 - lr: 5.0000e-04
Epoch 12/80
353/353 [==============================] - ETA: 0s - loss: 1.1107 - accuracy: 0.5822
Epoch 12: val_loss improved from 0.98079 to 0.95996, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.1107 - accuracy: 0.5822 - val_loss: 0.9600 - val_accuracy: 0.6299 - lr: 5.0000e-04
Epoch 13/80
353/353 [==============================] - ETA: 0s - loss: 1.0748 - accuracy: 0.5941
Epoch 13: val_loss improved from 0.95996 to 0.94190, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.0748 - accuracy: 0.5941 - val_loss: 0.9419 - val_accuracy: 0.6347 - lr: 5.0000e-04
Epoch 14/80
353/353 [==============================] - ETA: 0s - loss: 1.0377 - accuracy: 0.6087
Epoch 14: val_loss improved from 0.94190 to 0.88872, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 1.0377 - accuracy: 0.6087 - val_loss: 0.8887 - val_accuracy: 0.6563 - lr: 5.0000e-04
Epoch 15/80
352/353 [============================>.] - ETA: 0s - loss: 1.0084 - accuracy: 0.6194
Epoch 15: val_loss did not improve from 0.88872
353/353 [==============================] - 17s 48ms/step - loss: 1.0081 - accuracy: 0.6193 - val_loss: 0.8945 - val_accuracy: 0.6542 - lr: 5.0000e-04
Epoch 16/80
352/353 [============================>.] - ETA: 0s - loss: 0.9744 - accuracy: 0.6353
Epoch 16: val_loss improved from 0.88872 to 0.87977, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 0.9742 - accuracy: 0.6354 - val_loss: 0.8798 - val_accuracy: 0.6593 - lr: 5.0000e-04
Epoch 17/80
352/353 [============================>.] - ETA: 0s - loss: 0.9514 - accuracy: 0.6422
Epoch 17: val_loss improved from 0.87977 to 0.83846, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 0.9517 - accuracy: 0.6420 - val_loss: 0.8385 - val_accuracy: 0.6814 - lr: 5.0000e-04
Epoch 18/80
353/353 [==============================] - ETA: 0s - loss: 0.9304 - accuracy: 0.6532
Epoch 18: val_loss improved from 0.83846 to 0.83428, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 0.9304 - accuracy: 0.6532 - val_loss: 0.8343 - val_accuracy: 0.6828 - lr: 5.0000e-04
Epoch 19/80
353/353 [==============================] - ETA: 0s - loss: 0.9040 - accuracy: 0.6589
Epoch 19: val_loss improved from 0.83428 to 0.80227, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 0.9040 - accuracy: 0.6589 - val_loss: 0.8023 - val_accuracy: 0.6979 - lr: 5.0000e-04
Epoch 20/80
352/353 [============================>.] - ETA: 0s - loss: 0.8826 - accuracy: 0.6680
Epoch 20: val_loss did not improve from 0.80227
353/353 [==============================] - 17s 49ms/step - loss: 0.8823 - accuracy: 0.6682 - val_loss: 0.8178 - val_accuracy: 0.6885 - lr: 5.0000e-04
Epoch 21/80
353/353 [==============================] - ETA: 0s - loss: 0.8669 - accuracy: 0.6785
Epoch 21: val_loss improved from 0.80227 to 0.78965, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 0.8669 - accuracy: 0.6785 - val_loss: 0.7897 - val_accuracy: 0.6983 - lr: 5.0000e-04
Epoch 22/80
352/353 [============================>.] - ETA: 0s - loss: 0.8434 - accuracy: 0.6842
Epoch 22: val_loss improved from 0.78965 to 0.76581, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 0.8434 - accuracy: 0.6843 - val_loss: 0.7658 - val_accuracy: 0.7096 - lr: 5.0000e-04
Epoch 23/80
352/353 [============================>.] - ETA: 0s - loss: 0.8268 - accuracy: 0.6914
Epoch 23: val_loss did not improve from 0.76581
353/353 [==============================] - 17s 49ms/step - loss: 0.8271 - accuracy: 0.6912 - val_loss: 0.7744 - val_accuracy: 0.7076 - lr: 5.0000e-04
Epoch 24/80
352/353 [============================>.] - ETA: 0s - loss: 0.8137 - accuracy: 0.6956
Epoch 24: val_loss improved from 0.76581 to 0.74595, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 0.8136 - accuracy: 0.6957 - val_loss: 0.7459 - val_accuracy: 0.7133 - lr: 5.0000e-04
Epoch 25/80
353/353 [==============================] - ETA: 0s - loss: 0.7925 - accuracy: 0.7040
Epoch 25: val_loss improved from 0.74595 to 0.73900, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 0.7925 - accuracy: 0.7040 - val_loss: 0.7390 - val_accuracy: 0.7239 - lr: 5.0000e-04
Epoch 26/80
353/353 [==============================] - ETA: 0s - loss: 0.7817 - accuracy: 0.7080
Epoch 26: val_loss improved from 0.73900 to 0.70486, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 0.7817 - accuracy: 0.7080 - val_loss: 0.7049 - val_accuracy: 0.7366 - lr: 5.0000e-04
Epoch 27/80
353/353 [==============================] - ETA: 0s - loss: 0.7662 - accuracy: 0.7174
Epoch 27: val_loss did not improve from 0.70486
353/353 [==============================] - 17s 48ms/step - loss: 0.7662 - accuracy: 0.7174 - val_loss: 0.7311 - val_accuracy: 0.7282 - lr: 5.0000e-04
Epoch 28/80
353/353 [==============================] - ETA: 0s - loss: 0.7535 - accuracy: 0.7222
Epoch 28: val_loss improved from 0.70486 to 0.69906, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 0.7535 - accuracy: 0.7222 - val_loss: 0.6991 - val_accuracy: 0.7412 - lr: 5.0000e-04
Epoch 29/80
353/353 [==============================] - ETA: 0s - loss: 0.7393 - accuracy: 0.7262
Epoch 29: val_loss improved from 0.69906 to 0.67862, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 0.7393 - accuracy: 0.7262 - val_loss: 0.6786 - val_accuracy: 0.7505 - lr: 5.0000e-04
Epoch 30/80
352/353 [============================>.] - ETA: 0s - loss: 0.7283 - accuracy: 0.7308
Epoch 30: val_loss did not improve from 0.67862
353/353 [==============================] - 17s 49ms/step - loss: 0.7285 - accuracy: 0.7309 - val_loss: 0.6934 - val_accuracy: 0.7477 - lr: 5.0000e-04
Epoch 31/80
353/353 [==============================] - ETA: 0s - loss: 0.7135 - accuracy: 0.7375
Epoch 31: val_loss improved from 0.67862 to 0.66868, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 0.7135 - accuracy: 0.7375 - val_loss: 0.6687 - val_accuracy: 0.7559 - lr: 5.0000e-04
Epoch 32/80
352/353 [============================>.] - ETA: 0s - loss: 0.7002 - accuracy: 0.7433
Epoch 32: val_loss did not improve from 0.66868
353/353 [==============================] - 17s 48ms/step - loss: 0.7001 - accuracy: 0.7433 - val_loss: 0.6718 - val_accuracy: 0.7553 - lr: 5.0000e-04
Epoch 33/80
352/353 [============================>.] - ETA: 0s - loss: 0.6916 - accuracy: 0.7460
Epoch 33: val_loss did not improve from 0.66868
353/353 [==============================] - 17s 48ms/step - loss: 0.6917 - accuracy: 0.7459 - val_loss: 0.6844 - val_accuracy: 0.7497 - lr: 5.0000e-04
Epoch 34/80
353/353 [==============================] - ETA: 0s - loss: 0.6759 - accuracy: 0.7500
Epoch 34: val_loss improved from 0.66868 to 0.66164, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 0.6759 - accuracy: 0.7500 - val_loss: 0.6616 - val_accuracy: 0.7628 - lr: 5.0000e-04
Epoch 35/80
352/353 [============================>.] - ETA: 0s - loss: 0.6674 - accuracy: 0.7563
Epoch 35: val_loss did not improve from 0.66164
353/353 [==============================] - 17s 48ms/step - loss: 0.6674 - accuracy: 0.7563 - val_loss: 0.6646 - val_accuracy: 0.7569 - lr: 5.0000e-04
Epoch 36/80
353/353 [==============================] - ETA: 0s - loss: 0.6583 - accuracy: 0.7579
Epoch 36: val_loss did not improve from 0.66164
353/353 [==============================] - 17s 48ms/step - loss: 0.6583 - accuracy: 0.7579 - val_loss: 0.6658 - val_accuracy: 0.7640 - lr: 5.0000e-04
Epoch 37/80
352/353 [============================>.] - ETA: 0s - loss: 0.6486 - accuracy: 0.7610
Epoch 37: val_loss improved from 0.66164 to 0.65705, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 0.6487 - accuracy: 0.7609 - val_loss: 0.6571 - val_accuracy: 0.7666 - lr: 5.0000e-04
Epoch 38/80
352/353 [============================>.] - ETA: 0s - loss: 0.6397 - accuracy: 0.7679
Epoch 38: val_loss improved from 0.65705 to 0.63274, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 0.6399 - accuracy: 0.7679 - val_loss: 0.6327 - val_accuracy: 0.7821 - lr: 5.0000e-04
Epoch 39/80
353/353 [==============================] - ETA: 0s - loss: 0.6335 - accuracy: 0.7684
Epoch 39: val_loss did not improve from 0.63274
353/353 [==============================] - 17s 48ms/step - loss: 0.6335 - accuracy: 0.7684 - val_loss: 0.6400 - val_accuracy: 0.7787 - lr: 5.0000e-04
Epoch 40/80
352/353 [============================>.] - ETA: 0s - loss: 0.6194 - accuracy: 0.7728
Epoch 40: val_loss did not improve from 0.63274
353/353 [==============================] - 17s 48ms/step - loss: 0.6194 - accuracy: 0.7727 - val_loss: 0.6500 - val_accuracy: 0.7710 - lr: 5.0000e-04
Epoch 41/80
353/353 [==============================] - ETA: 0s - loss: 0.6128 - accuracy: 0.7748Restoring model weights from the end of the best epoch: 38.

Epoch 41: val_loss improved from 0.63274 to 0.62615, saving model to best-model.h5
353/353 [==============================] - 17s 49ms/step - loss: 0.6128 - accuracy: 0.7748 - val_loss: 0.6261 - val_accuracy: 0.7789 - lr: 5.0000e-04
Epoch 41: early stopping
394/394 [==============================] - 3s 6ms/step - loss: 0.6214 - accuracy: 0.7894
Out[25]:
[0.6213833093643188, 0.7894318699836731]
In [26]:
model.save('augmented-model.h5', overwrite=True)

Final Model Results

In [196]:
plot_model_history(history10, name="Final CNN Model Test")
Min validation loss: 0.590213418006897 
Max validation loss: 1.729331135749817 
Min validation acc: 0.33849820494651794 
Max validation acc: 0.835915744304657
In [ ]:
plot_model_history(history10, name="Final CNN Model")
Min validation loss: 0.5459099411964417 
Max validation loss: 1.7715320587158203 
Min validation acc: 0.3178386986255646 
Max validation acc: 0.851807713508606
In [27]:
plot_model_history(history11, name="Final CNN Model with Data Augmentation")
Min validation loss: 0.6261454224586487 
Max validation loss: 1.9565800428390503 
Min validation acc: 0.1521652787923813 
Max validation acc: 0.7820818424224854

Predictions for Model without Data Augmentation

In [31]:
# model = tf.keras.models.load_model('final-model.h5')
model = tf.keras.models.load_model('/content/drive/MyDrive/FER H5/final-model.h5') # saved the best model on google drive

data = []
predicted = np.argmax(model.predict(x_test), axis=1)
actual = np.argmax(y_test, axis=1)
validate = []
table = PrettyTable(['Predicted Emotion', 'Actual Emotion', 'Predicted Emotion Text', 'Actual Emotion Text', 'Validate'])

for i in range(50):
  validate.append('True' if predicted[i] == actual[i] else 'False')
  data.append([predicted[i], 
               actual[i], 
               decoded_emotions[predicted[i]], 
               decoded_emotions[actual[i]], 
               validate[i]]
              )
for record in data:
  table.add_row(record)
print(table)

true_predictions = validate.count('True')
print(f"\nNumber of true predictions: {true_predictions}/{len(validate)}")
+-------------------+----------------+------------------------+---------------------+----------+
| Predicted Emotion | Actual Emotion | Predicted Emotion Text | Actual Emotion Text | Validate |
+-------------------+----------------+------------------------+---------------------+----------+
|         6         |       6        |        Neutral         |       Neutral       |   True   |
|         5         |       5        |        Suprised        |       Suprised      |   True   |
|         5         |       5        |        Suprised        |       Suprised      |   True   |
|         6         |       6        |        Neutral         |       Neutral       |   True   |
|         1         |       1        |       Disgusted        |      Disgusted      |   True   |
|         0         |       0        |         Angry          |        Angry        |   True   |
|         3         |       3        |         Happy          |        Happy        |   True   |
|         4         |       4        |          Sad           |         Sad         |   True   |
|         1         |       1        |       Disgusted        |      Disgusted      |   True   |
|         3         |       3        |         Happy          |        Happy        |   True   |
|         5         |       5        |        Suprised        |       Suprised      |   True   |
|         4         |       4        |          Sad           |         Sad         |   True   |
|         2         |       2        |         Scared         |        Scared       |   True   |
|         5         |       5        |        Suprised        |       Suprised      |   True   |
|         2         |       2        |         Scared         |        Scared       |   True   |
|         6         |       0        |        Neutral         |        Angry        |  False   |
|         1         |       1        |       Disgusted        |      Disgusted      |   True   |
|         3         |       3        |         Happy          |        Happy        |   True   |
|         6         |       6        |        Neutral         |       Neutral       |   True   |
|         2         |       2        |         Scared         |        Scared       |   True   |
|         4         |       4        |          Sad           |         Sad         |   True   |
|         3         |       3        |         Happy          |        Happy        |   True   |
|         2         |       2        |         Scared         |        Scared       |   True   |
|         6         |       6        |        Neutral         |       Neutral       |   True   |
|         6         |       6        |        Neutral         |       Neutral       |   True   |
|         6         |       6        |        Neutral         |       Neutral       |   True   |
|         4         |       4        |          Sad           |         Sad         |   True   |
|         2         |       2        |         Scared         |        Scared       |   True   |
|         4         |       4        |          Sad           |         Sad         |   True   |
|         0         |       0        |         Angry          |        Angry        |   True   |
|         5         |       5        |        Suprised        |       Suprised      |   True   |
|         4         |       4        |          Sad           |         Sad         |   True   |
|         3         |       6        |         Happy          |       Neutral       |  False   |
|         4         |       4        |          Sad           |         Sad         |   True   |
|         6         |       6        |        Neutral         |       Neutral       |   True   |
|         5         |       5        |        Suprised        |       Suprised      |   True   |
|         4         |       4        |          Sad           |         Sad         |   True   |
|         3         |       3        |         Happy          |        Happy        |   True   |
|         2         |       2        |         Scared         |        Scared       |   True   |
|         4         |       4        |          Sad           |         Sad         |   True   |
|         2         |       2        |         Scared         |        Scared       |   True   |
|         4         |       2        |          Sad           |        Scared       |  False   |
|         0         |       0        |         Angry          |        Angry        |   True   |
|         4         |       6        |          Sad           |       Neutral       |  False   |
|         5         |       5        |        Suprised        |       Suprised      |   True   |
|         2         |       2        |         Scared         |        Scared       |   True   |
|         2         |       2        |         Scared         |        Scared       |   True   |
|         6         |       6        |        Neutral         |       Neutral       |   True   |
|         1         |       1        |       Disgusted        |      Disgusted      |   True   |
|         2         |       2        |         Scared         |        Scared       |   True   |
+-------------------+----------------+------------------------+---------------------+----------+

Number of true predictions: 46/50
In [ ]:
fig = plt.figure(figsize=(25, 15))
fig.suptitle("Predictions vs Truth", fontsize=20, weight='bold')
rows = 5
columns = 10

for i in range(rows * columns):
  fig.add_subplot(rows, columns, i+1)
  plt.grid(False)
  plt.imshow(np.squeeze(x_test[i]), cmap=plt.cm.gray)
  plt.xticks([])
  plt.yticks([])
  if decoded_emotions[predicted[i]] != decoded_emotions[actual[i]]:
    plt.title("Predicted: " + decoded_emotions[predicted[i]], color='r')
  else:
    plt.title("Predicted: " + decoded_emotions[predicted[i]], color='g')
  plt.xlabel("Actual: " + decoded_emotions[actual[i]], color='b')
save_figure("Predictions vs Truth", tight_layout=False)
plt.show()
Predictions vs Truth has been saved
In [33]:
print(classification_report(actual, predicted, target_names=decoded_emotions.values()))
              precision    recall  f1-score   support

       Angry       0.80      0.84      0.82      1770
   Disgusted       0.98      1.00      0.99      1778
      Scared       0.83      0.77      0.80      1775
       Happy       0.86      0.83      0.84      1872
         Sad       0.73      0.70      0.72      1825
    Suprised       0.91      0.95      0.93      1755
     Neutral       0.75      0.79      0.77      1810

    accuracy                           0.84     12585
   macro avg       0.84      0.84      0.84     12585
weighted avg       0.84      0.84      0.84     12585

In [34]:
confusion_matrix = tf.math.confusion_matrix(actual, predicted)
plt.figure(figsize=(10, 8))
sns.heatmap(confusion_matrix, annot=True, 
            cmap='Blues', fmt='d', 
            xticklabels=decoded_emotions.values(),
            yticklabels=decoded_emotions.values())
plt.xlabel('Predicted')
plt.ylabel('Actual')
save_figure("Confusion Matrix")
Confusion Matrix has been saved

Predictions for Model with Data Augmentation

In [28]:
model = tf.keras.models.load_model('augmented-model.h5')

data = []
predicted = np.argmax(model.predict(x_test), axis=1)
actual = np.argmax(y_test, axis=1)
validate = []
table = PrettyTable(['Predicted Emotion', 'Actual Emotion', 'Predicted Emotion Text', 'Actual Emotion Text', 'Validate'])

for i in range(50):
  validate.append('True' if predicted[i] == actual[i] else 'False')
  data.append([predicted[i], 
               actual[i], 
               decoded_emotions[predicted[i]], 
               decoded_emotions[actual[i]], 
               validate[i]]
              )
for record in data:
  table.add_row(record)
print(table)

true_predictions = validate.count('True')
print(f"\nNumber of true predictions: {true_predictions}/{len(validate)}")
+-------------------+----------------+------------------------+---------------------+----------+
| Predicted Emotion | Actual Emotion | Predicted Emotion Text | Actual Emotion Text | Validate |
+-------------------+----------------+------------------------+---------------------+----------+
|         3         |       6        |         Happy          |       Neutral       |  False   |
|         5         |       5        |        Suprised        |       Suprised      |   True   |
|         0         |       5        |         Angry          |       Suprised      |  False   |
|         6         |       6        |        Neutral         |       Neutral       |   True   |
|         1         |       1        |       Disgusted        |      Disgusted      |   True   |
|         0         |       0        |         Angry          |        Angry        |   True   |
|         3         |       3        |         Happy          |        Happy        |   True   |
|         4         |       4        |          Sad           |         Sad         |   True   |
|         1         |       1        |       Disgusted        |      Disgusted      |   True   |
|         6         |       3        |        Neutral         |        Happy        |  False   |
|         5         |       5        |        Suprised        |       Suprised      |   True   |
|         4         |       4        |          Sad           |         Sad         |   True   |
|         3         |       2        |         Happy          |        Scared       |  False   |
|         6         |       5        |        Neutral         |       Suprised      |  False   |
|         2         |       2        |         Scared         |        Scared       |   True   |
|         6         |       0        |        Neutral         |        Angry        |  False   |
|         1         |       1        |       Disgusted        |      Disgusted      |   True   |
|         3         |       3        |         Happy          |        Happy        |   True   |
|         6         |       6        |        Neutral         |       Neutral       |   True   |
|         2         |       2        |         Scared         |        Scared       |   True   |
|         4         |       4        |          Sad           |         Sad         |   True   |
|         3         |       3        |         Happy          |        Happy        |   True   |
|         2         |       2        |         Scared         |        Scared       |   True   |
|         6         |       6        |        Neutral         |       Neutral       |   True   |
|         6         |       6        |        Neutral         |       Neutral       |   True   |
|         3         |       6        |         Happy          |       Neutral       |  False   |
|         4         |       4        |          Sad           |         Sad         |   True   |
|         2         |       2        |         Scared         |        Scared       |   True   |
|         4         |       4        |          Sad           |         Sad         |   True   |
|         0         |       0        |         Angry          |        Angry        |   True   |
|         2         |       5        |         Scared         |       Suprised      |  False   |
|         2         |       4        |         Scared         |         Sad         |  False   |
|         3         |       6        |         Happy          |       Neutral       |  False   |
|         0         |       4        |         Angry          |         Sad         |  False   |
|         6         |       6        |        Neutral         |       Neutral       |   True   |
|         5         |       5        |        Suprised        |       Suprised      |   True   |
|         4         |       4        |          Sad           |         Sad         |   True   |
|         3         |       3        |         Happy          |        Happy        |   True   |
|         4         |       2        |          Sad           |        Scared       |  False   |
|         6         |       4        |        Neutral         |         Sad         |  False   |
|         2         |       2        |         Scared         |        Scared       |   True   |
|         2         |       2        |         Scared         |        Scared       |   True   |
|         0         |       0        |         Angry          |        Angry        |   True   |
|         0         |       6        |         Angry          |       Neutral       |  False   |
|         5         |       5        |        Suprised        |       Suprised      |   True   |
|         2         |       2        |         Scared         |        Scared       |   True   |
|         6         |       2        |        Neutral         |        Scared       |  False   |
|         6         |       6        |        Neutral         |       Neutral       |   True   |
|         1         |       1        |       Disgusted        |      Disgusted      |   True   |
|         5         |       2        |        Suprised        |        Scared       |  False   |
+-------------------+----------------+------------------------+---------------------+----------+

Number of true predictions: 34/50
In [29]:
fig = plt.figure(figsize=(25, 15))
fig.suptitle("Predictions vs Truth", fontsize=20, weight='bold')
rows = 5
columns = 10

for i in range(rows * columns):
  fig.add_subplot(rows, columns, i+1)
  plt.grid(False)
  plt.imshow(np.squeeze(x_test[i]), cmap=plt.cm.gray)
  plt.xticks([])
  plt.yticks([])
  if decoded_emotions[predicted[i]] != decoded_emotions[actual[i]]:
    plt.title("Predicted: " + decoded_emotions[predicted[i]], color='r')
  else:
    plt.title("Predicted: " + decoded_emotions[predicted[i]], color='g')
  plt.xlabel("Actual: " + decoded_emotions[actual[i]], color='b')
save_figure("Predictions vs Truth", tight_layout=False)
plt.show()
Predictions vs Truth has been saved
In [30]:
print(classification_report(actual, predicted, target_names=decoded_emotions.values()))
              precision    recall  f1-score   support

       Angry       0.74      0.78      0.76      1770
   Disgusted       0.99      1.00      0.99      1778
      Scared       0.75      0.64      0.69      1775
       Happy       0.87      0.83      0.85      1872
         Sad       0.70      0.59      0.64      1825
    Suprised       0.85      0.92      0.88      1755
     Neutral       0.64      0.77      0.70      1810

    accuracy                           0.79     12585
   macro avg       0.79      0.79      0.79     12585
weighted avg       0.79      0.79      0.79     12585

In [31]:
confusion_matrix = tf.math.confusion_matrix(actual, predicted)
plt.figure(figsize=(10, 8))
sns.heatmap(confusion_matrix, annot=True, 
            cmap='Blues', fmt='d', 
            xticklabels=decoded_emotions.values(),
            yticklabels=decoded_emotions.values())
plt.xlabel('Predicted')
plt.ylabel('Actual')
save_figure("Confusion Matrix")
Confusion Matrix has been saved

Saving the Model

In [32]:
fer_to_json = model.to_json()  
with open("best-model.json", "w") as json_file:  
    json_file.write(fer_to_json)  

Conclusions

Model Validation Loss Validation Accuracy True predictions on first 50 test images Best Model?
Final Model 0.5459 0.8518 46/50 Yes
Final Model with Data Augmentation 0.6261 0.7821 34/50 No